hexsha
stringlengths 40
40
| size
int64 2
1.05M
| ext
stringclasses 9
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
193
| max_stars_repo_name
stringlengths 6
109
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
36.6k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
193
| max_issues_repo_name
stringlengths 6
109
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
29.8k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
193
| max_forks_repo_name
stringlengths 6
109
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
11.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.05M
| avg_line_length
float64 1
404k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f76128ae606cdffdd1b63eae17d5fe73952db8a8
| 6,268 |
py
|
Python
|
configs/common/O3_ARM_v7a.py
|
pnkfb9/gem5_priority
|
fbf766277df78a470758cf7d798d12fb1e7c51c4
|
[
"BSD-3-Clause"
] | null | null | null |
configs/common/O3_ARM_v7a.py
|
pnkfb9/gem5_priority
|
fbf766277df78a470758cf7d798d12fb1e7c51c4
|
[
"BSD-3-Clause"
] | null | null | null |
configs/common/O3_ARM_v7a.py
|
pnkfb9/gem5_priority
|
fbf766277df78a470758cf7d798d12fb1e7c51c4
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2012 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
from m5.objects import *
# Simple ALU Instructions have a latency of 1
class O3_ARM_v7a_Simple_Int(FUDesc):
opList = [ OpDesc(opClass='IntAlu', opLat=1) ]
count = 2
# Complex ALU instructions have a variable latencies
class O3_ARM_v7a_Complex_Int(FUDesc):
opList = [ OpDesc(opClass='IntMult', opLat=3, issueLat=1),
OpDesc(opClass='IntDiv', opLat=12, issueLat=12),
OpDesc(opClass='IprAccess', opLat=3, issueLat=1) ]
count = 1
# Floating point and SIMD instructions
class O3_ARM_v7a_FP(FUDesc):
opList = [ OpDesc(opClass='SimdAdd', opLat=4),
OpDesc(opClass='SimdAddAcc', opLat=4),
OpDesc(opClass='SimdAlu', opLat=4),
OpDesc(opClass='SimdCmp', opLat=4),
OpDesc(opClass='SimdCvt', opLat=3),
OpDesc(opClass='SimdMisc', opLat=3),
OpDesc(opClass='SimdMult',opLat=5),
OpDesc(opClass='SimdMultAcc',opLat=5),
OpDesc(opClass='SimdShift',opLat=3),
OpDesc(opClass='SimdShiftAcc', opLat=3),
OpDesc(opClass='SimdSqrt', opLat=9),
OpDesc(opClass='SimdFloatAdd',opLat=5),
OpDesc(opClass='SimdFloatAlu',opLat=5),
OpDesc(opClass='SimdFloatCmp', opLat=3),
OpDesc(opClass='SimdFloatCvt', opLat=3),
OpDesc(opClass='SimdFloatDiv', opLat=3),
OpDesc(opClass='SimdFloatMisc', opLat=3),
OpDesc(opClass='SimdFloatMult', opLat=3),
OpDesc(opClass='SimdFloatMultAcc',opLat=1),
OpDesc(opClass='SimdFloatSqrt', opLat=9),
OpDesc(opClass='FloatAdd', opLat=5),
OpDesc(opClass='FloatCmp', opLat=5),
OpDesc(opClass='FloatCvt', opLat=5),
OpDesc(opClass='FloatDiv', opLat=9, issueLat=9),
OpDesc(opClass='FloatSqrt', opLat=33, issueLat=33),
OpDesc(opClass='FloatMult', opLat=4) ]
count = 2
# Load/Store Units
class O3_ARM_v7a_Load(FUDesc):
opList = [ OpDesc(opClass='MemRead',opLat=2) ]
count = 1
class O3_ARM_v7a_Store(FUDesc):
opList = [OpDesc(opClass='MemWrite',opLat=2) ]
count = 1
# Functional Units for this CPU
class O3_ARM_v7a_FUP(FUPool):
FUList = [O3_ARM_v7a_Simple_Int(), O3_ARM_v7a_Complex_Int(),
O3_ARM_v7a_Load(), O3_ARM_v7a_Store(), O3_ARM_v7a_FP()]
class O3_ARM_v7a_3(DerivO3CPU):
predType = "tournament"
localCtrBits = 2
localHistoryTableSize = 64
localHistoryBits = 6
globalPredictorSize = 8192
globalCtrBits = 2
globalHistoryBits = 13
choicePredictorSize = 8192
choiceCtrBits = 2
BTBEntries = 2048
BTBTagSize = 18
RASSize = 16
instShiftAmt = 2
LQEntries = 16
SQEntries = 16
LSQDepCheckShift = 0
LFSTSize = 1024
SSITSize = 1024
decodeToFetchDelay = 1
renameToFetchDelay = 1
iewToFetchDelay = 1
commitToFetchDelay = 1
renameToDecodeDelay = 1
iewToDecodeDelay = 1
commitToDecodeDelay = 1
iewToRenameDelay = 1
commitToRenameDelay = 1
commitToIEWDelay = 1
fetchWidth = 3
fetchToDecodeDelay = 3
decodeWidth = 3
decodeToRenameDelay = 2
renameWidth = 3
renameToIEWDelay = 1
issueToExecuteDelay = 1
dispatchWidth = 6
issueWidth = 8
wbWidth = 8
wbDepth = 1
fuPool = O3_ARM_v7a_FUP()
iewToCommitDelay = 1
renameToROBDelay = 1
commitWidth = 8
squashWidth = 8
trapLatency = 13
backComSize = 5
forwardComSize = 5
numPhysIntRegs = 128
numPhysFloatRegs = 128
numIQEntries = 32
numROBEntries = 40
switched_out = False
# Instruction Cache
class O3_ARM_v7a_ICache(BaseCache):
hit_latency = 1
response_latency = 1
block_size = 64
mshrs = 2
tgts_per_mshr = 8
size = '32kB'
assoc = 2
is_top_level = 'true'
# Data Cache
class O3_ARM_v7a_DCache(BaseCache):
hit_latency = 2
response_latency = 2
block_size = 64
mshrs = 6
tgts_per_mshr = 8
size = '32kB'
assoc = 2
write_buffers = 16
is_top_level = 'true'
# TLB Cache
# Use a cache as a L2 TLB
class O3_ARM_v7aWalkCache(BaseCache):
hit_latency = 4
response_latency = 4
block_size = 64
mshrs = 6
tgts_per_mshr = 8
size = '1kB'
assoc = 8
write_buffers = 16
is_top_level = 'true'
# L2 Cache
class O3_ARM_v7aL2(BaseCache):
hit_latency = 12
response_latency = 12
block_size = 64
mshrs = 16
tgts_per_mshr = 8
size = '1MB'
assoc = 16
write_buffers = 8
prefetch_on_access = 'true'
# Simple stride prefetcher
prefetcher = StridePrefetcher(degree=8, latency = 1)
| 31.817259 | 72 | 0.669592 |
f76132741c9272b8cd781e2cb8a147a4cfe95d6d
| 7,465 |
py
|
Python
|
libnamebench/base_ui.py
|
claudioandre-br/namebench
|
5bb5df4b4996be6a0947810b800177481f4e7299
|
[
"Apache-2.0"
] | 5 |
2015-08-19T13:22:38.000Z
|
2022-02-07T00:22:44.000Z
|
libnamebench/base_ui.py
|
claudioandre/namebench
|
5bb5df4b4996be6a0947810b800177481f4e7299
|
[
"Apache-2.0"
] | null | null | null |
libnamebench/base_ui.py
|
claudioandre/namebench
|
5bb5df4b4996be6a0947810b800177481f4e7299
|
[
"Apache-2.0"
] | 2 |
2021-02-09T15:15:47.000Z
|
2021-08-28T08:53:52.000Z
|
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A base user-interface workflow, to be inherited by UI modules."""
import tempfile
import benchmark
import better_webbrowser
import config
import data_sources
import geoip
import nameserver_list
import reporter
import site_connector
import util
__author__ = '[email protected] (Thomas Stromberg)'
class BaseUI(object):
"""Common methods for all UI implementations."""
def __init__(self):
self.SetupDataStructures()
def SetupDataStructures(self):
"""Instead of requiring users to inherit __init__(), this sets up structures."""
self.reporter = None
self.nameservers = None
self.bmark = None
self.report_path = None
self.csv_path = None
self.geodata = None
self.country = None
self.sources = {}
self.url = None
self.share_state = None
self.test_records = []
def UpdateStatus(self, msg, **kwargs):
"""Update the little status message on the bottom of the window."""
if hasattr(self, 'status_callback') and self.status_callback:
self.status_callback(msg, **kwargs)
else:
print msg
def DebugMsg(self, message):
self.UpdateStatus(message, debug=True)
def LoadDataSources(self):
self.data_src = data_sources.DataSources(status_callback=self.UpdateStatus)
def PrepareTestRecords(self):
"""Figure out what data source a user wants, and create test_records."""
if self.options.input_source:
src_type = self.options.input_source
else:
src_type = self.data_src.GetBestSourceDetails()[0]
self.options.input_source = src_type
self.test_records = self.data_src.GetTestsFromSource(
src_type,
self.options.query_count,
select_mode=self.options.select_mode
)
def PrepareNameServers(self):
"""Setup self.nameservers to have a list of healthy fast servers."""
self.nameservers = nameserver_list.NameServers(
self.supplied_ns,
global_servers=self.global_ns,
regional_servers=self.regional_ns,
include_internal=self.include_internal,
num_servers=self.options.num_servers,
timeout=self.options.timeout,
ping_timeout=self.options.ping_timeout,
health_timeout=self.options.health_timeout,
ipv6_only=self.options.ipv6_only,
status_callback=self.UpdateStatus
)
if self.options.invalidate_cache:
self.nameservers.InvalidateSecondaryCache()
self.nameservers.cache_dir = tempfile.gettempdir()
# Don't waste time checking the health of the only nameserver in the list.
if len(self.nameservers) > 1:
self.nameservers.thread_count = int(self.options.health_thread_count)
self.nameservers.cache_dir = tempfile.gettempdir()
self.UpdateStatus('Checking latest sanity reference')
(primary_checks, secondary_checks, censor_tests) = config.GetLatestSanityChecks()
if not self.options.enable_censorship_checks:
censor_tests = []
else:
self.UpdateStatus('Censorship checks enabled: %s found.' % len(censor_tests))
self.nameservers.CheckHealth(primary_checks, secondary_checks, censor_tests=censor_tests)
def PrepareBenchmark(self):
"""Setup the benchmark object with the appropriate dataset."""
if len(self.nameservers) == 1:
thread_count = 1
else:
thread_count = self.options.benchmark_thread_count
self.bmark = benchmark.Benchmark(self.nameservers,
query_count=self.options.query_count,
run_count=self.options.run_count,
thread_count=thread_count,
status_callback=self.UpdateStatus)
def RunBenchmark(self):
"""Run the benchmark."""
results = self.bmark.Run(self.test_records)
index = []
if self.options.upload_results in (1, True):
connector = site_connector.SiteConnector(self.options, status_callback=self.UpdateStatus)
index_hosts = connector.GetIndexHosts()
if index_hosts:
index = self.bmark.RunIndex(index_hosts)
else:
index = []
self.DiscoverLocation()
if len(self.nameservers) > 1:
self.nameservers.RunPortBehaviorThreads()
self.reporter = reporter.ReportGenerator(self.options, self.nameservers,
results, index=index, geodata=self.geodata)
def DiscoverLocation(self):
if not getattr(self, 'geodata', None):
self.geodata = geoip.GetGeoData()
self.country = self.geodata.get('country_name', None)
return self.geodata
def RunAndOpenReports(self):
"""Run the benchmark and open up the report on completion."""
self.RunBenchmark()
best = self.reporter.BestOverallNameServer()
self.CreateReports()
if self.options.template == 'html':
self.DisplayHtmlReport()
if self.url:
self.UpdateStatus('Complete! Your results: %s' % self.url)
else:
self.UpdateStatus('Complete! %s [%s] is the best.' % (best.name, best.ip))
def CreateReports(self):
"""Create CSV & HTML reports for the latest run."""
if self.options.output_file:
self.report_path = self.options.output_file
else:
self.report_path = util.GenerateOutputFilename(self.options.template)
if self.options.csv_file:
self.csv_path = self.options_csv_file
else:
self.csv_path = util.GenerateOutputFilename('csv')
if self.options.upload_results in (1, True):
# This is for debugging and transparency only.
self.json_path = util.GenerateOutputFilename('js')
self.UpdateStatus('Saving anonymized JSON to %s' % self.json_path)
json_data = self.reporter.CreateJsonData()
f = open(self.json_path, 'w')
f.write(json_data)
f.close()
self.UpdateStatus('Uploading results to %s' % self.options.site_url)
connector = site_connector.SiteConnector(self.options, status_callback=self.UpdateStatus)
self.url, self.share_state = connector.UploadJsonResults(
json_data,
hide_results=self.options.hide_results
)
if self.url:
self.UpdateStatus('Your sharing URL: %s (%s)' % (self.url, self.share_state))
self.UpdateStatus('Saving report to %s' % self.report_path)
f = open(self.report_path, 'w')
self.reporter.CreateReport(format=self.options.template,
output_fp=f,
csv_path=self.csv_path,
sharing_url=self.url,
sharing_state=self.share_state)
f.close()
self.UpdateStatus('Saving detailed results to %s' % self.csv_path)
self.reporter.SaveResultsToCsv(self.csv_path)
def DisplayHtmlReport(self):
self.UpdateStatus('Opening %s' % self.report_path)
better_webbrowser.output = self.DebugMsg
better_webbrowser.open(self.report_path)
| 35.379147 | 95 | 0.685332 |
f76150c07dc958addebcb1092fa4038e7c90bea3
| 2,327 |
py
|
Python
|
day_13/day_13.py
|
furgerf/advent-of-code-2019
|
f2c6ad9d401c91a7b04bb699d233a7d6ec9da2ac
|
[
"Apache-2.0"
] | null | null | null |
day_13/day_13.py
|
furgerf/advent-of-code-2019
|
f2c6ad9d401c91a7b04bb699d233a7d6ec9da2ac
|
[
"Apache-2.0"
] | null | null | null |
day_13/day_13.py
|
furgerf/advent-of-code-2019
|
f2c6ad9d401c91a7b04bb699d233a7d6ec9da2ac
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from enum import Enum, unique
import numpy as np
from day import Day
from intcode import Intcode
class Day13(Day):
@unique
class TileType(Enum):
NOTHING = 0
WALL = 1
BLOCK = 2
PADDLE = 3
BALL = 4
class GameMap:
def __init__(self):
self._game_map = None
self._paddle_x = None
self._ball_x = None
self._current_score = None
@property
def paddle_x(self):
return self._paddle_x
@property
def ball_x(self):
return self._ball_x
@property
def current_score(self):
return self._current_score
@property
def number_of_blocks(self):
return len(np.where(self._game_map == Day13.TileType.BLOCK.value)[0])
def _initialize_map(self, updates):
max_x = max(u[0] for u in updates)
max_y = max(u[1] for u in updates)
self._game_map = np.zeros(shape=(max_x+1, max_y+1))
def update_map(self, update_list):
updates = list(zip(*[update_list[i::3] for i in range(3)]))
if self._game_map is None:
self._initialize_map(updates)
for update in updates:
if update[0] == -1 and update[1] == 0:
self._current_score = update[2]
continue
self._game_map[update[0], update[1]] = Day13.TileType(update[2]).value
if update[2] == Day13.TileType.BALL.value:
self._ball_x = update[0]
if update[2] == Day13.TileType.PADDLE.value:
self._paddle_x = update[0]
def __init__(self):
super(Day13, self).__init__(13)
def parse_data(self):
return self.parse_intcode_data()
def part_1(self):
intcode = Intcode(self.data[:])
intcode.compute()
game_map = Day13.GameMap()
game_map.update_map(intcode.outputs)
return game_map.number_of_blocks
@property
def part_1_solution(self):
return 258
def part_2(self):
own_data = self.data[:]
own_data[0] = 2
intcode = Intcode(own_data)
game_map = Day13.GameMap()
while not intcode.partial_compute():
game_map.update_map(intcode.outputs)
intcode.clear_output()
intcode.add_input(np.sign(game_map.ball_x - game_map.paddle_x))
game_map.update_map(intcode.outputs)
return game_map.current_score
@property
def part_2_solution(self):
return 12765
| 23.505051 | 78 | 0.646755 |
f7615205021bd871e363727e5c65fe55ee3068f6
| 3,668 |
py
|
Python
|
tools/cardiff/compare_sets.py
|
lebauce/edeploy
|
a0fe832817a3a39575df50405044c7c4c5b515a5
|
[
"Apache-2.0"
] | null | null | null |
tools/cardiff/compare_sets.py
|
lebauce/edeploy
|
a0fe832817a3a39575df50405044c7c4c5b515a5
|
[
"Apache-2.0"
] | null | null | null |
tools/cardiff/compare_sets.py
|
lebauce/edeploy
|
a0fe832817a3a39575df50405044c7c4c5b515a5
|
[
"Apache-2.0"
] | null | null | null |
import collections
import pprint
import os
import glob
class Machine:
def __init__(self, name, value):
self.name = name
self.value = value
def compare(sets):
machines = []
for current_set in sets:
my_string = repr(sets[current_set])
machines.append(Machine(current_set, my_string))
to_be_sorted = collections.defaultdict(list)
for machine in machines:
key = machine.value
value = machine.name
to_be_sorted[key].append(value)
return dict(to_be_sorted)
def get_hosts_list_from_result(result):
systems_list = []
for element in result:
current_set = set()
for system in result[element]:
current_set.add(system)
systems_list.append(current_set)
return systems_list
def print_systems_groups(systems_groups):
total_hosts = 0
for system in systems_groups:
total_hosts += len(system)
print "The %d systems can be grouped in %d groups of " \
"identical hardware" % (total_hosts, len(systems_groups))
for system in systems_groups:
print "Group %d (%d Systems)" % (
systems_groups.index(system), len(system))
print "-> " + ', '.join(system)
print
def print_groups(global_params, result, title):
print "##### %s #####" % title
groups_name = ""
for element in result:
group = result[element]
group_name = title.strip().replace(" ", "_")
if ("output_dir" in global_params.keys()):
group_name = "%s/%s" % (global_params["output_dir"], group_name)
for host in group:
group_name = "%s_%s" % (group_name, host.strip())
groups_name = "%s '%s.def'" % (groups_name, group_name)
print "%d identical systems :" % (len(group))
print group
pprint.pprint(sorted(eval(element)))
# But always save it to a file for diffing
if ("output_dir" in global_params.keys()):
with open("%s.def" % group_name, "w") as fout:
pprint.pprint(sorted(eval(element)), fout)
print
if ("output_dir" in global_params.keys()):
if (len(result) > 1):
output_file = "%s/%s.diff" % (global_params["output_dir"],
title.strip().replace(" ", "_"))
os.system("diff -ub --from-file %s > '%s'" %
(groups_name, output_file))
else:
# If no difference exists, we can kill the def files
for filename in glob.glob("%s/%s*.def" %
(global_params["output_dir"],
title.strip().replace(" ", "_"))):
os.remove(filename)
print "#####"*2 + "#"*len(title)
def compute_similar_hosts_list(systems_groups, new_groups):
for group in new_groups:
for systems_group in systems_groups:
intersection = set.intersection(systems_group, group)
if (len(intersection) < len(systems_group) and
len(intersection) > 0):
# print "%d vs %d" % (len(intersection), len(systems_group))
# We do have a partial match meaning we shall break
# the existing group in pieces
difference = set.difference(systems_group, group)
# The group we worked on doesn't exist anymore
# So let's delete it
systems_groups.remove(systems_group)
# Let's add the two sub groups generated by this split
systems_groups.append(intersection)
systems_groups.append(difference)
| 33.345455 | 76 | 0.5747 |
f76155671d609034cee340b00cb426fe45133ba1
| 933 |
py
|
Python
|
src/visions/application/summaries/series/numerical_summary.py
|
ieaves/tenzing
|
92d39c1c3a5633d8074e0ffe8c2687c465aebbc8
|
[
"MIT"
] | null | null | null |
src/visions/application/summaries/series/numerical_summary.py
|
ieaves/tenzing
|
92d39c1c3a5633d8074e0ffe8c2687c465aebbc8
|
[
"MIT"
] | null | null | null |
src/visions/application/summaries/series/numerical_summary.py
|
ieaves/tenzing
|
92d39c1c3a5633d8074e0ffe8c2687c465aebbc8
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
def numerical_summary(series: pd.Series) -> dict:
"""
Args:
series: series to summarize
Returns:
"""
aggregates = [
"mean",
"std",
"var",
"max",
"min",
"median",
"kurt",
"skew",
"sum",
"mad",
]
summary = series.agg(aggregates).to_dict()
quantiles = [0.05, 0.25, 0.5, 0.75, 0.95]
for percentile, value in series.quantile(quantiles).to_dict().items():
summary["quantile_{:d}".format(int(percentile * 100))] = value
summary["iqr"] = summary["quantile_75"] - summary["quantile_25"]
summary["range"] = summary["max"] - summary["min"]
summary["cv"] = summary["std"] / summary["mean"] if summary["mean"] else np.NaN
# TODO: only calculations for histogram, not the plotting
# summary['image'] = plotting.histogram(series)
return summary
| 23.923077 | 83 | 0.565916 |
f76172322cd3ea9dd933528d7073a7f7a6c43c8d
| 1,545 |
py
|
Python
|
venv/Lib/site-packages/PygameLord/Loads.py
|
apoorv-x12/SpaceInvadersClassicGame-Myfirst-game
|
5a4ac187c19a7decfb900b392c3fb897f6613a2c
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/PygameLord/Loads.py
|
apoorv-x12/SpaceInvadersClassicGame-Myfirst-game
|
5a4ac187c19a7decfb900b392c3fb897f6613a2c
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/PygameLord/Loads.py
|
apoorv-x12/SpaceInvadersClassicGame-Myfirst-game
|
5a4ac187c19a7decfb900b392c3fb897f6613a2c
|
[
"MIT"
] | null | null | null |
#LordLynx
#Part of PygameLord
import pygame,os
from pygame.locals import*
pygame.init()
#Loading Objects
'''
Parse_Locations(file)
file: Your text file, use a .txt
# Like in Python will be ingored thusly follow this example
#Coment
./File/File
./File/Other File
...
'''
def Parse_Locations(file):
file = open(file, 'r')#read the file
lines = []
folders = []
for text_line in file:
lines.append(text_line) #pull the files info
file.close()#close it
moding = []
for i in lines:
s =i.strip('\n')#split the lines up
moding.append(s)
for i in moding:
if i != '\n' and i[0] != '#': #ignore new lines or coments '#'
folders.append(i)
return folders
'''
Lord_Loaders(paths,files)
paths: The folders returned in the Parse_Locations function
files: The .files which you wish to use
Modified versions of this are in Sounds and Images
If the opertunity arises copy and paste this code into your program and change the files like the Image and Sound loaeders
'''
def Lord_Loader(paths,files):
Files = []
File_Set = {}
for path in paths:
file = os.listdir(path)
for Object in file: #loops through the parts
for fileEnd in files:
if Object.endswith(fileEnd):
Images.append(os.path.join(path, Object))
for file in Files:#appends them
text = os.path.split(file)[-1]
text = text.split('.')
text =text[0]
File_Set[text] = file
return Image_Set
| 27.105263 | 122 | 0.629126 |
f76177e937d55fa02fc80c6deb06b1ca63da6a42
| 3,414 |
py
|
Python
|
dali/utils/misc.py
|
JonathanRaiman/dali-cython-stub
|
e258469aeb1d4cb3e4cdf5c07e8948f461a038f1
|
[
"MIT"
] | 7 |
2016-06-20T17:50:06.000Z
|
2019-12-13T17:27:46.000Z
|
dali/utils/misc.py
|
JonathanRaiman/dali-cython
|
e258469aeb1d4cb3e4cdf5c07e8948f461a038f1
|
[
"MIT"
] | 6 |
2015-08-04T07:25:38.000Z
|
2015-08-13T22:06:22.000Z
|
dali/utils/misc.py
|
JonathanRaiman/dali-cython
|
e258469aeb1d4cb3e4cdf5c07e8948f461a038f1
|
[
"MIT"
] | 2 |
2016-07-04T21:38:14.000Z
|
2016-08-31T02:53:19.000Z
|
import dill as pickle
import inspect
import numpy as np
import types
from os import makedirs, listdir
from os.path import join, exists
import dali.core as D
class RunningAverage(object):
def __init__(self, alpha=0.95):
self.alpha = alpha
self.value = None
def update(self, measurement):
if self.value is None:
self.value = measurement
else:
self.value = (self.alpha * self.value +
(1.0 - self.alpha) * measurement)
def __float__(self):
return float(self.value)
def apply_recursively_on_type(x, f, target_type, list_callback=None):
if type(x) == target_type:
return f(x)
elif type(x) == list or isinstance(x, types.GeneratorType):
ret = [ apply_recursively_on_type(el, f, target_type, list_callback) for el in x]
if list_callback and all(type(el) == target_type for el in x):
ret = list_callback(ret)
return ret
elif type(x) == dict:
res = {}
for k,v in x.items():
res[k] = apply_recursively_on_type(v, f, target_type, list_callback)
return res
else:
return x
def integer_ceil(a, b):
return (a + b - 1) // b
def subsample(seq, maximum_length):
if seq == []:
return seq
return seq[::integer_ceil(len(seq), maximum_length)]
def median_smoothing(signal, window=10):
res = []
for i in range(window, len(signal)):
actual_window = signal[i-window:i]
res.append(np.median(actual_window))
return res
def pickle_from_scope(directory, variables, caller_globals=None, caller_locals=None):
if not exists(directory):
makedirs(directory)
if caller_globals is None or caller_locals is None:
stack = inspect.stack()
if caller_globals is None:
caller_globals = stack[1][0].f_globals
if caller_locals is None:
caller_locals = stack[1][0].f_locals
del stack
for var in variables:
with open(join(directory, var + ".pkz"), "wb") as f:
value = caller_locals.get(var) or caller_globals.get(var)
assert value is not None
pickle.dump(value, f)
def unpickle_as_dict(directory, whitelist=None, extension='.pkz'):
assert exists(directory)
res = {}
for file_name in listdir(directory):
if file_name.endswith(extension):
var_name = file_name[:-len(extension)]
if whitelist is None or var_name in whitelist:
with open(join(directory, file_name), "rb") as f:
res[var_name] = pickle.load(f)
return res
def add_device_args(parser):
parser.add_argument("--device", type=str, default='gpu', choices=['gpu','cpu'], help="Whether model should run on GPU or CPU.")
parser.add_argument("--gpu_id", type=int, default=0, help="Which GPU to use (zero-indexed just like in CUDA APIs)")
def set_device_from_args(args, verbose=False):
D.config.default_device = args.device
if args.device == 'gpu':
D.config.default_gpu = args.gpu_id
if verbose:
print("Using %s" % (D.config.gpu_id_to_name(args.gpu_id)))
__all__ = [
"apply_recursively_on_type",
"integer_ceil",
"subsample",
"median_smoothing",
"pickle_from_scope",
"unpickle_as_dict",
"RunningAverage",
"add_device_args",
"set_device_from_args"
]
| 30.482143 | 134 | 0.630053 |
f7617bd0075ee83bab13c654c4221f855bd00ec1
| 2,560 |
py
|
Python
|
homeassistant/components/weather/demo.py
|
mbs-technologie/home-assistant
|
71fc446425cbb1c0d4670c261ce8ea3bfd83a73d
|
[
"MIT"
] | 13 |
2017-02-01T13:25:34.000Z
|
2022-01-26T01:30:39.000Z
|
homeassistant/components/weather/demo.py
|
1Forward1Back/home-assistant
|
ce24ef0c20dea0fd671d6f2c2a8b1456b4b66ba6
|
[
"MIT"
] | 9 |
2017-07-26T18:05:32.000Z
|
2021-12-05T14:16:34.000Z
|
homeassistant/components/weather/demo.py
|
1Forward1Back/home-assistant
|
ce24ef0c20dea0fd671d6f2c2a8b1456b4b66ba6
|
[
"MIT"
] | 21 |
2017-07-26T17:09:40.000Z
|
2022-03-27T22:37:22.000Z
|
"""
Demo platform that offers fake meteorological data.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
from homeassistant.components.weather import WeatherEntity
from homeassistant.const import (TEMP_CELSIUS, TEMP_FAHRENHEIT)
CONDITION_CLASSES = {
'cloudy': [],
'fog': [],
'hail': [],
'lightning': [],
'lightning-rainy': [],
'partlycloudy': [],
'pouring': [],
'rainy': ['shower rain'],
'snowy': [],
'snowy-rainy': [],
'sunny': ['sunshine'],
'windy': [],
'windy-variant': [],
'exceptional': [],
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Demo weather."""
add_devices([
DemoWeather('South', 'Sunshine', 21, 92, 1099, 0.5, TEMP_CELSIUS),
DemoWeather('North', 'Shower rain', -12, 54, 987, 4.8, TEMP_FAHRENHEIT)
])
class DemoWeather(WeatherEntity):
"""Representation of a weather condition."""
def __init__(self, name, condition, temperature, humidity, pressure,
wind_speed, temperature_unit):
"""Initialize the Demo weather."""
self._name = name
self._condition = condition
self._temperature = temperature
self._temperature_unit = temperature_unit
self._humidity = humidity
self._pressure = pressure
self._wind_speed = wind_speed
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format('Demo Weather', self._name)
@property
def should_poll(self):
"""No polling needed for a demo weather condition."""
return False
@property
def temperature(self):
"""Return the temperature."""
return self._temperature
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._temperature_unit
@property
def humidity(self):
"""Return the humidity."""
return self._humidity
@property
def wind_speed(self):
"""Return the wind speed."""
return self._wind_speed
@property
def pressure(self):
"""Return the wind speed."""
return self._pressure
@property
def condition(self):
"""Return the weather condition."""
return [k for k, v in CONDITION_CLASSES.items() if
self._condition.lower() in v][0]
@property
def attribution(self):
"""Return the attribution."""
return 'Powered by Home Assistant'
| 26.947368 | 79 | 0.615234 |
f76180ed5d4a6ad1dc2a760fed07faea11ca2ac1
| 82 |
py
|
Python
|
describe/describe/core/__init__.py
|
MadsAW/machine-learning-on-materials
|
6101c7e3d12be54b12391c78442294198a39cc9b
|
[
"MIT"
] | 2 |
2018-10-10T09:32:34.000Z
|
2019-03-28T08:42:31.000Z
|
describe/describe/core/__init__.py
|
MadsAW/machine-learning-on-materials
|
6101c7e3d12be54b12391c78442294198a39cc9b
|
[
"MIT"
] | null | null | null |
describe/describe/core/__init__.py
|
MadsAW/machine-learning-on-materials
|
6101c7e3d12be54b12391c78442294198a39cc9b
|
[
"MIT"
] | null | null | null |
from describe.core.system import System
from describe.core.lattice import Lattice
| 27.333333 | 41 | 0.853659 |
f761b3a8b89d1160652e80d381a79bfac007cf06
| 160 |
py
|
Python
|
bw2temporalis/tests/__init__.py
|
brightway-lca/temporalis
|
00a49931b6f93f11d1889b5e09c4a0820079114d
|
[
"BSD-3-Clause"
] | null | null | null |
bw2temporalis/tests/__init__.py
|
brightway-lca/temporalis
|
00a49931b6f93f11d1889b5e09c4a0820079114d
|
[
"BSD-3-Clause"
] | 1 |
2020-10-08T18:27:29.000Z
|
2020-10-08T18:27:29.000Z
|
bw2temporalis/tests/__init__.py
|
brightway-lca/temporalis
|
00a49931b6f93f11d1889b5e09c4a0820079114d
|
[
"BSD-3-Clause"
] | null | null | null |
from .dlca import DynamicLCATestCase
from .ia import DynamicIATestCase
from .td import TemporalDistributionTestCase
from .climate import ClimateMetricsTestCase
| 32 | 44 | 0.875 |
f761d48f366b3d81c7fd4edbb34fab5c9e225946
| 652 |
py
|
Python
|
trac/wiki/__init__.py
|
mikiec84/trac
|
d51a7119b9fcb9061d7fe135c7d648fa671555dd
|
[
"BSD-3-Clause"
] | null | null | null |
trac/wiki/__init__.py
|
mikiec84/trac
|
d51a7119b9fcb9061d7fe135c7d648fa671555dd
|
[
"BSD-3-Clause"
] | null | null | null |
trac/wiki/__init__.py
|
mikiec84/trac
|
d51a7119b9fcb9061d7fe135c7d648fa671555dd
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2020 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/.
from trac.wiki.api import *
from trac.wiki.formatter import *
from trac.wiki.intertrac import *
from trac.wiki.model import *
from trac.wiki.parser import *
| 34.315789 | 67 | 0.76227 |
f761e2fdccea1b338af9d65fb45fe255f71d8844
| 1,464 |
py
|
Python
|
dish/interpreter.py
|
dullbananas/dish
|
01f1439bc38861c0890412becccc9135c05e621b
|
[
"MIT"
] | 13 |
2019-11-25T00:52:58.000Z
|
2021-01-04T14:43:12.000Z
|
dish/interpreter.py
|
dullbananas/dish
|
01f1439bc38861c0890412becccc9135c05e621b
|
[
"MIT"
] | null | null | null |
dish/interpreter.py
|
dullbananas/dish
|
01f1439bc38861c0890412becccc9135c05e621b
|
[
"MIT"
] | 1 |
2020-01-23T19:59:22.000Z
|
2020-01-23T19:59:22.000Z
|
import os
import time
import click
from . import procs
class Interpreter:
def __init__(self, ctx, verbose):
self.ctx = ctx
self.verbose = verbose
self.lines = []
self.in_comment = False
def feed(self, line):
if len(self.lines) > 0:
# End of multi-line comment
if self.lines[0].startswith('#==') and line.endswith('==#'):
self.lines = []
self.in_comment = False
return False
return True
start_time = time.time()
# Handle exit command or EOF
if line == 'exit':
self.ctx.exit()
# Blank lines
elif line.strip() == '':
pass
# Print debug information
elif line == 'debug':
click.echo('Configuration values:')
for key, val in self.ctx.obj.config.items():
click.echo(f' {key} = {repr(val)}')
# cd
elif line.startswith('cd '):
try:
dirname = line[3:].strip()
os.chdir(os.path.expanduser(dirname))
except OSError as e:
click.echo(e, err=True)
# Start of multiline comments
elif line.startswith('#=='):
self.lines.append(line)
self.in_comment = True
self.ctx.obj.previous_cmd_duration = 0
return True
# Single-line comments
elif line.strip()[0] == '#':
pass
# Normal commands
else:
try:
with self.ctx:
procs.run_line(line, echo_args=self.verbose)
except FileNotFoundError as e:
click.echo(f'Command not found: {e.filename}', err=True)
self.lines = []
self.ctx.obj.previous_cmd_duration = time.time() - start_time
return False
| 21.529412 | 63 | 0.646858 |
f7622a0e8cb54686307717dfc205c3ba03a27138
| 2,193 |
py
|
Python
|
urbandict-search/app.py
|
madhukar01/examples
|
7f4a2ee3394a2483bb9c848a674abd93c0f34443
|
[
"Apache-2.0"
] | null | null | null |
urbandict-search/app.py
|
madhukar01/examples
|
7f4a2ee3394a2483bb9c848a674abd93c0f34443
|
[
"Apache-2.0"
] | null | null | null |
urbandict-search/app.py
|
madhukar01/examples
|
7f4a2ee3394a2483bb9c848a674abd93c0f34443
|
[
"Apache-2.0"
] | null | null | null |
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import click
import os
import string
import random
from jina.flow import Flow
RANDOM_SEED = 10 # 5
os.environ['PARALLEL'] = str(2)
os.environ['SHARDS'] = str(2)
def get_random_ws(workspace_path, length=8):
random.seed(RANDOM_SEED)
letters = string.ascii_lowercase
dn = ''.join(random.choice(letters) for i in range(length))
return os.path.join(workspace_path, dn)
def print_topk(resp, word):
for d in resp.search.docs:
print(f'Ta-Dah🔮, here are what we found for: {word}')
for idx, match in enumerate(d.matches):
score = match.score.value
if score <= 0.0:
continue
word_def = match.chunks[0].text
word = match.meta_info.decode()
print('> {:>2d}({:.2f}). {}: "{}"'.format(idx, score, word, word_def.strip()))
@click.command()
@click.option('--task', '-t')
@click.option('--num_docs', '-n', default=50)
@click.option('--top_k', '-k', default=5)
def main(task, num_docs, top_k):
workspace_path = '/tmp/jina/urbandict'
os.environ['TMP_WORKSPACE'] = get_random_ws(workspace_path)
print(f'{os.environ["TMP_WORKSPACE"]}')
data_fn = os.environ.get('WASHED_DATA_DIR', os.path.join(workspace_path, 'urbandict-word-defs.csv'))
if task == 'index':
f = Flow().load_config('flow-index.yml')
with f:
f.index_lines(filepath=data_fn, size=num_docs, batch_size=16)
elif task == 'query':
f = Flow().load_config('flow-query.yml')
with f:
while True:
text = input('word definition: ')
if not text:
break
ppr = lambda x: print_topk(x, text)
f.search_lines(lines=[text, ], output_fn=ppr, topk=top_k)
elif task == 'query_restful':
f = Flow().load_config('flow-query.yml')
f.use_rest_gateway()
with f:
f.block()
else:
raise NotImplementedError(
f'unknown task: {task}. A valid task is `index` or `query` or `query_restful`.')
if __name__ == '__main__':
main()
| 31.782609 | 104 | 0.599635 |
f7622bdbd148128a912a21d30fc2a0f97001fcb8
| 10,710 |
py
|
Python
|
utils/helpful_util.py
|
drcastillo/hicss2020
|
0a812257215c75054d8b891e23c933d6a8327c0d
|
[
"Apache-2.0"
] | null | null | null |
utils/helpful_util.py
|
drcastillo/hicss2020
|
0a812257215c75054d8b891e23c933d6a8327c0d
|
[
"Apache-2.0"
] | null | null | null |
utils/helpful_util.py
|
drcastillo/hicss2020
|
0a812257215c75054d8b891e23c933d6a8327c0d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Reference:
#from __future__ import print_function
#from utils.heaton_utils import *
import numpy as np
import warnings
import os
import pandas as pd
import matplotlib.pyplot as plt
import sys
import glob
#pip install counter
from collections import Counter
import pickle
import sklearn
from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve
from keras.models import load_model
from keras.models import model_from_json
from sklearn.model_selection import train_test_split
import seaborn as sns
from IPython.display import display, HTML
from sklearn.metrics import classification_report
from utils.perturbation import load_models_lendingclub
from IPython.display import display_html, display, HTML
import lime.lime_tabular
import lime
class KerasModelUtil:
modelwts_extension = "h5"
json_extension = "json"
pickle_extension = "p"
def save(self, model_dir, model_name, model, label_class_map):
if model_dir.endswith('/') == False:
model_dir = model_dir + '/'
# put the file name into specific tokens
fn_base, sep, tail = model_name.partition('.')
if not sep:
sep = "."
json_fn = model_dir + fn_base + sep + self.json_extension
wt_ext = tail
if not wt_ext:
wt_ext = self.modelwts_extension
wt_fn = model_dir + fn_base + sep + wt_ext
pickle_fn = model_dir + fn_base + sep + self.pickle_extension
pickle.dump(label_class_map, open(pickle_fn, 'wb'))
# serialize model to JSON
model_json = model.to_json()
with open(json_fn, "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(wt_fn)
def load(self, model_dir, model_name, input_shape=(None, 224, 224, 3)):
# Load the json model first
if model_dir.endswith('/') == False:
model_dir = model_dir + '/'
# put the file name into specific tokens
fn_base, sep, tail = model_name.partition('.')
if not sep:
sep = "."
json_fn = model_dir + fn_base + sep + self.json_extension
json_file = open(json_fn, 'r')
loaded_model_json = json_file.read()
json_file.close()
# form the model from the json and rebuild the layers
loaded_model = model_from_json(loaded_model_json)
loaded_model.build(input_shape=input_shape)
# Load the weights
wt_ext = tail
if not wt_ext:
wt_ext = self.modelwts_extension
wt_fn = model_dir + fn_base + sep + wt_ext
loaded_model.load_weights(wt_fn)
#print("Loaded model from disk")
# Load the labels and Class ids
pickle_fn = model_dir + fn_base + sep + self.pickle_extension
label_classids = pickle.load(open(pickle_fn, "rb"))
class_label_map = {v: k for k, v in label_classids.items()}
#print(label_classids)
#print(classids_labels)
return loaded_model, class_label_map
##################################################
# Keras callbacks for plotting training model
# accuracy and loss
##################################################
from IPython.display import clear_output
import math
import keras
#Can just import LiveLossPlot & add to model callbacks.
class TrainingPlot(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.acc = []
self.val_acc = []
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(self.i)
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
self.acc.append(logs.get('acc'))
self.val_acc.append(logs.get('val_acc'))
self.i += 1
f, (ax1, ax2) = plt.subplots(1, 2, sharex=False)
clear_output(wait=True)
ax1.set_yscale('log')
ax1.plot(self.x, self.losses, label="training loss")
ax1.plot(self.x, self.val_losses, label="validation loss")
ax1.legend()
ax2.set_ylim(0, 1.0)
ax2.plot(self.x, self.acc, label="training accuracy")
ax2.plot(self.x, self.val_acc, label="validation accuracy")
ax2.legend()
plt.show()
##################################################
# Utility code for computing a Confusion Matrix
##################################################
import matplotlib.pyplot as plt #for plotting
import itertools as it
#Note, this code is taken straight from the SKLEARN website, a nice way of viewing confusion matrix.
def plot_confusion_matrix(cm,
classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
Note: class is a listlike parameter. Pass in list of classes, eg: ["No Loan", "Loan"]
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in it.product(range(cm.shape[0]), range(cm.shape[1])):
value = '{0:.2g}'.format(cm[i, j])
plt.text(j,
i,
value,
fontsize=10,
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
##################################################
# Utility code for measuring model performance given dataset size
##################################################
def plot_learning_curve(estimator,
title,
X,
y,
ylim=None,
cv=None,
n_jobs=-1,
train_sizes=np.linspace(.1, 1.0, 5)):
"""Generate a simple plot of the test and training learning curve"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes,
train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std,
alpha=0.1,
color="r")
plt.fill_between(train_sizes,
test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std,
alpha=0.1,
color="g")
plt.plot(train_sizes,
train_scores_mean,
'o-',
color="r",
label="Training score")
plt.plot(train_sizes,
test_scores_mean,
'o-',
color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
def display_sklearn_feature_importance(data, set, features, n_features):
'''
Parameters:
data: data object; coomatrix w/ encoded features
n_features: number of features to visualize
set: str;
'lendingclub' - load lending club models
'uci' - load uci models
Returns:
Graph of basic feature importance measurements
'''
if 'uci' in set:
rfc, gbc, logit, keras_ann, sk_ann = load_models_uci()
else:
rfc, gbc, logit, keras_ann, sk_ann = load_models_lendingclub()
feature_importance = pd.DataFrame({
"feature":
features,
"RF_Feature_Importance":
np.round(rfc.feature_importances_, 4),
"GBC_Feature_Importance":
np.round(gbc.feature_importances_, 4),
"Logit_Coeff":
np.round(logit.coef_[0], 4),
"Max_Feature_Val":
pd.DataFrame(data.toarray(), columns=features).max(),
})
n = n_features
feature_importance['coeff_max'] = feature_importance[
'Logit_Coeff'] * feature_importance['Max_Feature_Val']
temp = feature_importance.nlargest(n, 'RF_Feature_Importance')
sns.barplot(temp['RF_Feature_Importance'], temp['feature'])
plt.title('Random Forest - Feature Importance Top {}'.format(n_features))
plt.show()
temp = feature_importance.nlargest(n, 'GBC_Feature_Importance')
sns.barplot(temp['GBC_Feature_Importance'], temp['feature'])
plt.title('Gradient Boosted Classifier - Feature Importance Top {}'.format(
n_features))
plt.show()
#We want to show the total possible feature impact here. Take the max of each feature in the training set by the logit coeff.
lookup = pd.DataFrame(data.toarray(), columns=features).max()
temp = feature_importance.nlargest(int(n / 2), 'coeff_max')
temp1 = feature_importance.nsmallest(int(n / 2), 'coeff_max')
temp = pd.concat([temp, temp1])
sns.barplot(temp['coeff_max'], temp['feature'])
plt.title('Logistic Regression - Coefficients Top&Bottom {}'.format(
int(n_features / 2)))
plt.show()
def get_best_score(x, y):
try:
return sklearn.metrics.accuracy_score(x, y.predict(encoded_test))
except:
return sklearn.metrics.accuracy_score(x, keras_ann.predict_classes(encoded_test.toarray()))
def display_side_by_side(*args):
html_str = ''
for df in args:
html_str += df.to_html()
display_html(html_str.replace('table', 'table style="display:inline"'),
raw=True)
def neg_pos_logit_coefficients(model, features):
logistic_regress_coeff = pd.DataFrame({
"features": features,
"Coef": model.coef_[0]
})
neg_coef = round(logistic_regress_coeff[
logistic_regress_coeff['Coef'] < 0].sort_values('Coef', ascending=True),2).head(15)
pos_coef = round(logistic_regress_coeff[
logistic_regress_coeff['Coef'] > 0].sort_values('Coef', ascending=False),2).head(15)
display_side_by_side(neg_coef, pos_coef)
| 32.259036 | 129 | 0.610458 |
f7629707b41706bf06514ffdec0a9c54a845d59c
| 1,424 |
py
|
Python
|
test/lmp/model/_lstm_1997/test_pred.py
|
ProFatXuanAll/char-RNN
|
531f101b3d1ba20bafd28ca060aafe6f583d1efb
|
[
"Beerware"
] | null | null | null |
test/lmp/model/_lstm_1997/test_pred.py
|
ProFatXuanAll/char-RNN
|
531f101b3d1ba20bafd28ca060aafe6f583d1efb
|
[
"Beerware"
] | null | null | null |
test/lmp/model/_lstm_1997/test_pred.py
|
ProFatXuanAll/char-RNN
|
531f101b3d1ba20bafd28ca060aafe6f583d1efb
|
[
"Beerware"
] | null | null | null |
"""Test prediction.
Test target:
- :py:meth:`lmp.model._lstm_1997.LSTM1997.pred`.
"""
import torch
from lmp.model._lstm_1997 import LSTM1997
def test_prediction_result(lstm_1997: LSTM1997, batch_cur_tkids: torch.Tensor) -> None:
"""Return float tensor with correct shape and range."""
lstm_1997 = lstm_1997.eval()
seq_len = batch_cur_tkids.size(1)
batch_prev_states = None
for i in range(seq_len):
batch_next_tkids_pd, batch_prev_states = lstm_1997.pred(
batch_cur_tkids=batch_cur_tkids[..., i],
batch_prev_states=batch_prev_states,
)
# Output float tensor.
assert batch_next_tkids_pd.dtype == torch.float
# Shape: (batch_size, vocab_size).
assert batch_next_tkids_pd.size() == torch.Size([batch_cur_tkids.shape[0], lstm_1997.emb.num_embeddings])
# Probabilities are values within range [0, 1].
assert torch.all(0 <= batch_next_tkids_pd).item()
assert torch.all(batch_next_tkids_pd <= 1).item()
# Sum of the probabilities equals to 1.
accum = batch_next_tkids_pd.sum(dim=-1)
assert torch.allclose(accum, torch.ones_like(accum))
assert isinstance(batch_prev_states, list)
assert len(batch_prev_states) == 2
assert batch_prev_states[0].size() == torch.Size([batch_cur_tkids.size(0), lstm_1997.n_blk * lstm_1997.d_blk])
assert batch_prev_states[1].size() == torch.Size([batch_cur_tkids.size(0), lstm_1997.n_blk, lstm_1997.d_blk])
| 33.904762 | 114 | 0.728933 |
f762c093c822c644f705f207047d58f9e6b8ee1d
| 581 |
py
|
Python
|
geminipy/helpers.py
|
Vanclief/gemini-python
|
8472e32b3db436d4a9ef5c1b9ea376109f23845c
|
[
"MIT"
] | null | null | null |
geminipy/helpers.py
|
Vanclief/gemini-python
|
8472e32b3db436d4a9ef5c1b9ea376109f23845c
|
[
"MIT"
] | null | null | null |
geminipy/helpers.py
|
Vanclief/gemini-python
|
8472e32b3db436d4a9ef5c1b9ea376109f23845c
|
[
"MIT"
] | null | null | null |
def dict_to_float(d):
"""
Converts all strings to floats from a dict
"""
if type(d) is dict:
for key, value in d.items():
if type(value) is str:
try:
d[key] = float(value)
except ValueError:
d[key] = str(value)
return d
def list_dict_to_float(l):
"""
Applies dict_to_float to all elements from a list
"""
for d in l:
try:
del d['timestampms']
except KeyError:
pass
d = dict_to_float(d)
return l
| 19.366667 | 53 | 0.483649 |
f762d0915302a8196bbaa04f02cba379793292f9
| 2,994 |
py
|
Python
|
docs/source/conf.py
|
YiweiLi4/tensorbay-python-sdk
|
20fc8b37bad01c2d201a87b1436f1d4ff77d76df
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
YiweiLi4/tensorbay-python-sdk
|
20fc8b37bad01c2d201a87b1436f1d4ff77d76df
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
YiweiLi4/tensorbay-python-sdk
|
20fc8b37bad01c2d201a87b1436f1d4ff77d76df
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright 2021 Graviti. Licensed under MIT License.
#
# pylint: disable=missing-module-docstring,invalid-name
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
"""Configuration file for the Sphinx documentation builder."""
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parents[2]))
# -- Project information -----------------------------------------------------
project = "TensorBay"
copyright = "2021, Graviti" # pylint: disable=redefined-builtin
author = "Graviti"
# The full version, including alpha/beta/rc tags
# release = "0.3.10"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc", # used for generating doc automatically
"sphinx.ext.viewcode", # used for imbedding source code automatically
"sphinx.ext.autosummary", # used for creating summary table automatically
"sphinx.ext.todo", # used for recording todo and todolist
"sphinx.ext.ifconfig", # used for configuration based on different condtitions
"sphinx.ext.intersphinx", # used for embedding doc links from other project such as python
"sphinx.ext.autosectionlabel", # used for referring sections in a rst file
"sphinx.ext.napoleon", # used for being compatible with Google and Numpy doc style
"sphinx.ext.coverage", # used for generating doc coverage report
]
# extensions_config
autosummary_generate = True
todo_include_todos = True
autosectionlabel_prefix_document = True
numfig = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_favicon = "images/favicon.svg"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
| 38.384615 | 95 | 0.698397 |
f762d1f6c845fe6bfbbb15839b0a302b49b9a2ce
| 7,737 |
py
|
Python
|
webapp/proto.py
|
dssg/babies-public
|
0a03e95992bfd7b7b4c2f11b8a5e2c3961f193c6
|
[
"MIT"
] | 5 |
2018-09-13T02:26:23.000Z
|
2019-10-21T12:38:16.000Z
|
webapp/proto.py
|
dssg/babies-public
|
0a03e95992bfd7b7b4c2f11b8a5e2c3961f193c6
|
[
"MIT"
] | null | null | null |
webapp/proto.py
|
dssg/babies-public
|
0a03e95992bfd7b7b4c2f11b8a5e2c3961f193c6
|
[
"MIT"
] | 3 |
2015-10-14T19:51:42.000Z
|
2022-03-29T07:12:39.000Z
|
import pandas as pd
import numpy as np
import psycopg2
from sqlalchemy import create_engine
import json
import sys
from sklearn.externals import joblib
import os
def run_all():
# connect to postgres
params = json.load(open('/home/ipan/passwords/psql_psycopg2.password', 'r'))
try:
conn = psycopg2.connect(**params)
conn.autocommit
cur = conn.cursor()
except:
print('Unable to connect to database')
# import from babysaver
sys.path.insert(0, '/home/ipan/babies/')
from babysaver import features
from babysaver import models
from babysaver.models import WeightedQuestions
from sklearn.linear_model import LogisticRegression
from babysaver import evaluation
# specify dat configuration in a dictionary
config_add1 = {'Features': None,
'Include 707G?': 'Y',
'707G Questions': range(35,52),
'707G Start Date': '2014-07-01',
'707G End Date': None,
'Include 711?': 'N',
'711 Questions': None,
'711 Start Date': None,
'711 End Date': None,
'Include FCM?': 'Y',
'Include BBO?': 'Y',
'Include other?': 'Y',
'Outcome': 'ADVB1_OTC'}
# use config_writer to write dictionary to csv file
features.config_writer(config_add1, '/home/ipan/configs/config_add1.csv')
# then use that csv file to load in the data
data_dct = features.data_getter('/home/ipan/configs/config_add1.csv',
conn=conn,
unique_identifier='UNI_PART_ID_I',
impute='fill_mode',
interactions=False)
# specify hyperparameter lists
c_list = [1e-4, 1e-3, 0.01, 0.1, 1, 10, 100, 1e3, 1e4, 1e20]
penalties = ['l2']
class_wgts = [None, 'auto']
wgt_schemes = ['odds_ratio_relative', 'odds_ratio_absolute',
'marginal_effects', 'positive_coefs']
# specify classifier dictionaries
expand_wgt = {'clf': WeightedQuestions,
'param_dict': {'C': c_list,
'penalty': penalties,
'class_weight': class_wgts,
'weight_scheme': wgt_schemes,
'round_dec': [1]
}
}
simple_wgt = {'clf': WeightedQuestions,
'param_dict': {'C': c_list,
'penalty': penalties,
'class_weight': class_wgts,
'weight_scheme': wgt_schemes,
'round_dec': [0]
}
}
log_lib = {'clf': LogisticRegression,
'param_dict': {'C': c_list,
'penalty': penalties,
'class_weight': class_wgts
}
}
# specify list of k for precision at k
k_list = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3]
# train a bunch of classifiers for each type of classifier
# I wanted to find the best one of each, so I did each one separately
expand_evals, expand_pkls = models.machine_learner(data_dct,
clf_library=expand_wgt,
pkl_folder='e_pkls',
cv='kfold_cv',
k=k_list,
n_folds=10)
simple_evals, simple_pkls = models.machine_learner(data_dct,
clf_library=simple_wgt,
pkl_folder='s_pkls',
cv='kfold_cv',
k=k_list,
n_folds=10)
log_evals, log_pkls = models.machine_learner(data_dct,
clf_library=log_lib,
pkl_folder='log_pkls',
cv='kfold_cv',
k=k_list,
n_folds=10)
# concatenate all the dataframes into one dataframe using
# output of machine learner
expand_df = evaluation.dict_to_dataframe(expand_evals, expand_pkls)
simple_df = evaluation.dict_to_dataframe(simple_evals, simple_pkls)
log_df = evaluation.dict_to_dataframe(log_evals, log_pkls)
# metric(s) to sort classifiers by
sort_metrics = ['precision at 0.1 mean', 'precision at 0.15 mean']
# mapping between question number and text
map_file = '/home/ipan/707G_question_map.csv'
# get a dataframe with weights and question text
expand_wgts = evaluation.weight_mapper(data_dct, expand_df,
sort_metrics, map_file, '707G')
expand_wgts.columns = ['QID', 'Question', 'Expanded Weights']
simple_wgts = evaluation.weight_mapper(data_dct, simple_df,
sort_metrics, map_file, '707G')
simple_wgts.columns = ['QID', 'Question', 'Simple Weights']
log_wgts = evaluation.weight_mapper(data_dct, log_df, sort_metrics,
map_file, '707G')
all_wgts = log_wgts.join([expand_wgts['Expanded Weights'],
simple_wgts['Simple Weights']])
# load in models
log_df = log_df.sort(sort_metrics, ascending=False)
log_model = joblib.load(log_df['pickle_file'][0])
ew_model = joblib.load(expand_df.sort(sort_metrics, ascending=False)['pickle_file'][0])
sw_model = joblib.load(simple_df.sort(sort_metrics, ascending=False)['pickle_file'][0])
df = data_dct['dataframe']
feats = data_dct['features']
log_scores = log_model.predict_proba(df[feats])[:,1]
pd.DataFrame({'scores': log_scores}).to_csv('scores.csv', index=False)
# calculate overall rate of adverse births
baseline_rate = np.round(df[data_dct['outcome']].mean()*100,1)
# calculate scores
ew_scores = ew_model.predict_proba(df[feats])[:,1]
sw_scores = sw_model.predict_proba(df[feats])[:,1]
# get metrics for various values of k
expand_mets = evaluation.metrics_getter(data_dct, expand_df,
sort_metrics, map_file,
k_list, ew_scores)
simple_mets = evaluation.metrics_getter(data_dct, simple_df,
sort_metrics, map_file,
k_list, sw_scores)
log_mets = evaluation.metrics_getter(data_dct, log_df,
sort_metrics, map_file,
k_list, log_scores, scale=True)
if not os.path.exists('best_pkl/'):
os.makedirs('best_pkl/')
# pickle the best logistic regression model for webapp prediction tool
joblib.dump(log_model, 'best_pkl/best_model.pkl')
return evaluation.weight_html(all_wgts), log_mets.to_html(), expand_mets.to_html(), simple_mets.to_html(), baseline_rate
| 44.211429 | 124 | 0.503425 |
f762d362f077648d6092ca421c6a397f0f3871dd
| 1,268 |
py
|
Python
|
data/spm/generate_corpus.py
|
tarohi24/docsim
|
4879bcc889c32470a27094eb183c2ce15ae2f1fb
|
[
"MIT"
] | 1 |
2020-04-29T13:52:29.000Z
|
2020-04-29T13:52:29.000Z
|
data/spm/generate_corpus.py
|
tarohi24/docsim
|
4879bcc889c32470a27094eb183c2ce15ae2f1fb
|
[
"MIT"
] | 136 |
2019-09-19T03:36:21.000Z
|
2019-11-28T08:50:45.000Z
|
data/spm/generate_corpus.py
|
tarohi24/docsim
|
4879bcc889c32470a27094eb183c2ce15ae2f1fb
|
[
"MIT"
] | null | null | null |
"""
Generate corpus for the specific category
"""
import argparse
from pathlib import Path
from typing import List
from tqdm import tqdm
from docsim.elas.search import EsResult, EsSearcher
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dataset',
type=str,
nargs=1)
parser.add_argument('output',
type=str,
nargs=1)
parser.add_argument('tags_file',
type=str,
nargs=1)
args = parser.parse_args()
dataset: str = args.dataset[0]
tags_file: str = args.tags_file[0]
output: str = args.output[0]
tags_path: Path = Path(__file__).parent.joinpath(tags_file)
with open(tags_path) as fin:
tags = fin.read().splitlines()
print(tags)
res: EsResult = EsSearcher(es_index=dataset)\
.initialize_query()\
.add_match_all()\
.add_query(terms=tags, field='tags')\
.add_source_fields(['text'])\
.scroll()
path: Path = Path(__file__).parent.joinpath(dataset).joinpath(f'{output}.txt')
with open(path, 'w') as fout:
for hit in tqdm(res.hits):
fout.write(hit.source['text'] + '\n')
| 28.177778 | 82 | 0.584385 |
f762ed8a51b6c1cbaedd599fc4e4202645413910
| 30 |
py
|
Python
|
doll/__init__.py
|
badge/doll
|
36d99d9dd8ab6ff2e155e8daa8dad28619e46ef7
|
[
"Apache-2.0"
] | 2 |
2016-12-17T23:10:50.000Z
|
2020-04-29T23:03:31.000Z
|
doll/__init__.py
|
badge/doll
|
36d99d9dd8ab6ff2e155e8daa8dad28619e46ef7
|
[
"Apache-2.0"
] | 3 |
2015-08-30T16:11:50.000Z
|
2015-09-01T19:59:50.000Z
|
doll/__init__.py
|
badge/doll
|
36d99d9dd8ab6ff2e155e8daa8dad28619e46ef7
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'Matthew Badger'
| 15 | 29 | 0.766667 |
f763059eb7944793f5098af41af78aa79fa2ad52
| 20,125 |
py
|
Python
|
pandaharvester/harvesterfifo/mysql_fifo.py
|
tsulaiav/harvester
|
ca3f78348019dd616738f2da7d50e81700a8e6b9
|
[
"Apache-2.0"
] | null | null | null |
pandaharvester/harvesterfifo/mysql_fifo.py
|
tsulaiav/harvester
|
ca3f78348019dd616738f2da7d50e81700a8e6b9
|
[
"Apache-2.0"
] | null | null | null |
pandaharvester/harvesterfifo/mysql_fifo.py
|
tsulaiav/harvester
|
ca3f78348019dd616738f2da7d50e81700a8e6b9
|
[
"Apache-2.0"
] | null | null | null |
import time
import functools
import warnings
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvestercore.plugin_base import PluginBase
from pandaharvester.harvesterconfig import harvester_config
warnings.simplefilter("ignore")
class MysqlFifo(PluginBase):
# constructor
def __init__(self, **kwarg):
self.reconnectTimeout = 300
if hasattr(harvester_config, 'fifo') and hasattr(harvester_config.fifo, 'reconnectTimeout'):
self.reconnectTimeout = harvester_config.db.reconnectTimeout
elif hasattr(harvester_config.db, 'reconnectTimeout'):
self.reconnectTimeout = harvester_config.db.reconnectTimeout
PluginBase.__init__(self, **kwarg)
self.tableName = '{title}_FIFO'.format(title=self.titleName)
# DB access attribues
if hasattr(self, 'db_host'):
db_host = self.db_host
else:
try:
db_host = harvester_config.fifo.db_host
except AttributeError:
db_host = '127.0.0.1'
if hasattr(self, 'db_port'):
db_port = self.db_port
else:
try:
db_port = harvester_config.fifo.db_port
except AttributeError:
db_port = 3306
if hasattr(self, 'db_user'):
db_user = self.db_user
else:
db_user = harvester_config.fifo.db_user
if hasattr(self, 'db_password'):
db_password = self.db_password
else:
db_password = harvester_config.fifo.db_password
if hasattr(self, 'db_schema'):
db_schema = self.db_schema
else:
db_schema = harvester_config.fifo.db_schema
# get connection, cursor and error types
try:
import MySQLdb
import MySQLdb.cursors
except ImportError:
try:
import mysql.connector
except ImportError:
raise Exception('No available MySQL DB API installed. Please pip install mysqlclient or mysql-connection-python')
else:
self.con = mysql.connector.connect(user=db_user, passwd=db_password, db=db_schema,
host=db_host, port=db_port, charset='utf8')
self.cur = self.con.cursor(buffered=True)
self.OperationalError = mysql.connector.errors.OperationalError
else:
class MyCursor (MySQLdb.cursors.Cursor):
def fetchone(self):
tmpRet = MySQLdb.cursors.Cursor.fetchone(self)
if tmpRet is None:
return None
return tmpRet
def fetchall(self):
tmpRets = MySQLdb.cursors.Cursor.fetchall(self)
return tmpRets
self.con = MySQLdb.connect(user=db_user, passwd=db_password,
db=db_schema, host=db_host, port=db_port,
cursorclass=MyCursor)
self.cur = self.con.cursor()
self.OperationalError = MySQLdb.OperationalError
# create table for fifo
try:
self._make_table()
# self._make_index()
self.commit()
except Exception as _e:
self.rollback()
raise _e
# decorator exception handler for type of DBs
def _handle_exception(method):
def _decorator(_method, *args, **kwargs):
@functools.wraps(_method)
def _wrapped_method(self, *args, **kwargs):
try:
_method(self, *args, **kwargs)
except Exception as exc:
# Case to try renew connection
isOperationalError = False
if isinstance(exc, self.OperationalError):
isOperationalError = True
if isOperationalError:
try_timestamp = time.time()
n_retry = 1
while time.time() - try_timestamp < self.reconnectTimeout:
try:
self.__init__()
return
except Exception as _e:
exc = _e
sleep_time = core_utils.retry_period_sec(n_retry, increment=2, max_seconds=300, min_seconds=1)
if not sleep_time:
break
else:
time.sleep(sleep_time)
n_retry += 1
raise exc
else:
raise exc
return _wrapped_method
return _decorator(method)
# wrapper for execute
@_handle_exception
def execute(self, sql, params=None):
retVal = self.cur.execute(sql, params)
return retVal
# wrapper for executemany
@_handle_exception
def executemany(self, sql, params_list):
retVal = self.cur.executemany(sql, params_list)
return retVal
# commit
@_handle_exception
def commit(self):
self.con.commit()
# rollback
@_handle_exception
def rollback(self):
self.con.rollback()
# make table
def _make_table(self):
sql_make_table = (
'CREATE TABLE IF NOT EXISTS {table_name} '
'('
' id BIGINT NOT NULL AUTO_INCREMENT,'
' item LONGBLOB,'
' score DOUBLE,'
' temporary TINYINT DEFAULT 0,'
' PRIMARY KEY (id) '
')'
).format(table_name=self.tableName)
self.execute(sql_make_table)
# make index
def _make_index(self):
sql_make_index = (
'CREATE INDEX IF NOT EXISTS score_index ON {table_name} '
'(score)'
).format(table_name=self.tableName)
self.execute(sql_make_index)
def _push(self, item, score):
sql_push = (
'INSERT INTO {table_name} '
'(item, score) '
'VALUES (%s, %s) '
).format(table_name=self.tableName)
params = (item, score)
self.execute(sql_push, params)
def _push_by_id(self, id, item, score):
sql_push = (
'INSERT IGNORE INTO {table_name} '
'(id, item, score) '
'VALUES (%s, %s, %s) '
).format(table_name=self.tableName)
params = (id, item, score)
self.execute(sql_push, params)
n_row = self.cur.rowcount
if n_row == 1:
return True
else:
return False
def _pop(self, timeout=None, protective=False, mode='first'):
sql_pop_get_first = (
'SELECT id, item, score FROM {table_name} '
'WHERE temporary = 0 '
'ORDER BY score LIMIT 1 '
).format(table_name=self.tableName)
sql_pop_get_last = (
'SELECT id, item, score FROM {table_name} '
'WHERE temporary = 0 '
'ORDER BY score DESC LIMIT 1 '
).format(table_name=self.tableName)
sql_pop_to_temp = (
'UPDATE {table_name} SET temporary = 1 '
'WHERE id = %s AND temporary = 0 '
).format(table_name=self.tableName)
sql_pop_del = (
'DELETE FROM {table_name} '
'WHERE id = %s AND temporary = 0 '
).format(table_name=self.tableName)
mode_sql_map = {
'first': sql_pop_get_first,
'last': sql_pop_get_last,
}
sql_pop_get = mode_sql_map[mode]
keep_polling = True
got_object = False
_exc = None
wait = 0.1
max_wait = 2
tries = 0
id = None
last_attempt_timestamp = time.time()
while keep_polling:
try:
self.execute(sql_pop_get)
res = self.cur.fetchall()
if len(res) > 0:
id, item, score = res[0]
params = (id,)
if protective:
self.execute(sql_pop_to_temp, params)
else:
self.execute(sql_pop_del, params)
n_row = self.cur.rowcount
self.commit()
if n_row >= 1:
got_object = True
except Exception as _e:
self.rollback()
_exc = _e
else:
if got_object:
keep_polling = False
return (id, item, score)
now_timestamp = time.time()
if timeout is None or (now_timestamp - last_attempt_timestamp) >= timeout:
keep_polling = False
if _exc is not None:
raise _exc
tries += 1
time.sleep(wait)
wait = min(max_wait, tries/10.0 + wait)
return None
def _peek(self, mode='first', id=None, skip_item=False):
if skip_item:
columns_str = 'id, score'
else:
columns_str = 'id, item, score'
sql_peek_first = (
'SELECT {columns} FROM {table_name} '
'WHERE temporary = 0 '
'ORDER BY score LIMIT 1 '
).format(columns=columns_str, table_name=self.tableName)
sql_peek_last = (
'SELECT {columns} FROM {table_name} '
'WHERE temporary = 0 '
'ORDER BY score DESC LIMIT 1 '
).format(columns=columns_str, table_name=self.tableName)
sql_peek_by_id = (
'SELECT {columns} FROM {table_name} '
'WHERE id = %s AND temporary = 0 '
).format(columns=columns_str, table_name=self.tableName)
sql_peek_by_id_temp = (
'SELECT {columns} FROM {table_name} '
'WHERE id = %s AND temporary = 1 '
).format(columns=columns_str, table_name=self.tableName)
mode_sql_map = {
'first': sql_peek_first,
'last': sql_peek_last,
'id': sql_peek_by_id,
'idtemp': sql_peek_by_id_temp,
}
sql_peek = mode_sql_map[mode]
if mode in ('id', 'idtemp'):
params = (id,)
self.execute(sql_peek, params)
else:
self.execute(sql_peek)
res = self.cur.fetchall()
self.commit()
if len(res) > 0:
if skip_item:
id, score = res[0]
item = None
else:
id, item, score = res[0]
return (id, item, score)
else:
return None
def _update(self, id, item=None, score=None, temporary=None, cond_score=None):
cond_score_str_map = {
'gt': 'AND score < %s',
'ge': 'AND score <= %s',
'lt': 'AND score > %s',
'le': 'AND score >= %s',
}
cond_score_str = cond_score_str_map.get(cond_score, '')
attr_set_list = []
params = []
if item is not None:
attr_set_list.append('item = %s')
params.append(item)
if score is not None:
attr_set_list.append('score = %s')
params.append(score)
if temporary is not None:
attr_set_list.append('temporary = %s')
params.append(temporary)
attr_set_str = ' , '.join(attr_set_list)
if not attr_set_str:
return False
sql_update = (
'UPDATE IGNORE {table_name} SET '
'{attr_set_str} '
'WHERE id = %s '
'{cond_score_str} '
).format(table_name=self.tableName, attr_set_str=attr_set_str, cond_score_str=cond_score_str)
params.append(id)
if cond_score_str:
params.append(score)
self.execute(sql_update, params)
n_row = self.cur.rowcount
if n_row == 1:
return True
else:
return False
# number of objects in queue
def size(self):
sql_size = (
'SELECT COUNT(id) FROM {table_name}'
).format(table_name=self.tableName)
self.execute(sql_size)
res = self.cur.fetchall()
if len(res) > 0:
return res[0][0]
return None
# enqueue with priority score
def put(self, item, score):
try:
self._push(item, score)
self.commit()
except Exception as _e:
self.rollback()
raise _e
# enqueue by id
def putbyid(self, id, item, score):
try:
retVal = self._push_by_id(id, item, score)
self.commit()
except Exception as _e:
self.rollback()
raise _e
else:
return retVal
# dequeue the first object
def get(self, timeout=None, protective=False):
return self._pop(timeout=timeout, protective=protective)
# dequeue the last object
def getlast(self, timeout=None, protective=False):
return self._pop(timeout=timeout, protective=protective, mode='last')
# dequeue list of objects with some conditions
def getmany(self, mode='first', minscore=None, maxscore=None, count=None,
protective=False, temporary=False):
temporary_str = 'temporary = 1' if temporary else 'temporary = 0'
minscore_str = '' if minscore is None else 'AND score >= {0}'.format(float(minscore))
maxscore_str = '' if maxscore is None else 'AND score <= {0}'.format(float(maxscore))
count_str = '' if count is None else 'LIMIT {0}'.format(int(count))
mode_rank_map = {
'first': '',
'last': 'DESC',
}
sql_get_many = (
'SELECT id, item, score FROM {table_name} '
'WHERE '
'{temporary_str} '
'{minscore_str} '
'{maxscore_str} '
'ORDER BY score {rank} '
'{count_str} '
).format(table_name=self.tableName, temporary_str=temporary_str,
minscore_str=minscore_str, maxscore_str=maxscore_str,
rank=mode_rank_map[mode], count_str=count_str)
sql_pop_to_temp = (
'UPDATE {table_name} SET temporary = 1 '
'WHERE id = %s AND temporary = 0 '
).format(table_name=self.tableName)
sql_pop_del = (
'DELETE FROM {table_name} '
'WHERE id = %s AND temporary = {temporary} '
).format(table_name=self.tableName, temporary=(1 if temporary else 0))
ret_list = []
try:
self.execute(sql_get_many)
res = self.cur.fetchall()
for _rec in res:
got_object =False
id, item, score = _rec
params = (id,)
if protective:
self.execute(sql_pop_to_temp, params)
else:
self.execute(sql_pop_del, params)
n_row = self.cur.rowcount
self.commit()
if n_row >= 1:
got_object = True
if got_object:
ret_list.append(_rec)
except Exception as _e:
self.rollback()
_exc = _e
return ret_list
# get tuple of (id, item, score) of the first object without dequeuing it
def peek(self, skip_item=False):
return self._peek(skip_item=skip_item)
# get tuple of (id, item, score) of the last object without dequeuing it
def peeklast(self, skip_item=False):
return self._peek(mode='last', skip_item=skip_item)
# get tuple of (id, item, score) of object by id without dequeuing it
def peekbyid(self, id, temporary=False, skip_item=False):
if temporary:
return self._peek(mode='idtemp', id=id, skip_item=skip_item)
else:
return self._peek(mode='id', id=id, skip_item=skip_item)
# get list of object tuples without dequeuing it
def peekmany(self, mode='first', minscore=None, maxscore=None, count=None, skip_item=False):
minscore_str = '' if minscore is None else 'AND score >= {0}'.format(float(minscore))
maxscore_str = '' if maxscore is None else 'AND score <= {0}'.format(float(maxscore))
count_str = '' if count is None else 'LIMIT {0}'.format(int(count))
mode_rank_map = {
'first': '',
'last': 'DESC',
}
if skip_item:
columns_str = 'id, score'
else:
columns_str = 'id, item, score'
sql_peek_many = (
'SELECT {columns} FROM {table_name} '
'WHERE temporary = 0 '
'{minscore_str} '
'{maxscore_str} '
'ORDER BY score {rank} '
'{count_str} '
).format(columns=columns_str, table_name=self.tableName,
minscore_str=minscore_str, maxscore_str=maxscore_str,
rank=mode_rank_map[mode], count_str=count_str)
self.execute(sql_peek_many)
res = self.cur.fetchall()
self.commit()
ret_list = []
for _rec in res:
if skip_item:
id, score = _rec
item = None
else:
id, item, score = _rec
ret_list.append((id, item, score))
return ret_list
# drop all objects in queue and index and reset the table
def clear(self):
sql_clear_index = (
'DROP INDEX IF EXISTS score_index ON {table_name} '
).format(table_name=self.tableName)
sql_clear_table = (
'DROP TABLE IF EXISTS {table_name} '
).format(table_name=self.tableName)
# self.execute(sql_clear_index)
self.execute(sql_clear_table)
self.__init__()
# delete objects by list of id
def delete(self, ids):
sql_delete_template = 'DELETE FROM {table_name} WHERE id in ({placeholders} ) '
if isinstance(ids, (list, tuple)):
placeholders_str = ','.join([' %s'] * len(ids))
sql_delete = sql_delete_template.format(
table_name=self.tableName, placeholders=placeholders_str)
self.execute(sql_delete, ids)
n_row = self.cur.rowcount
self.commit()
return n_row
else:
raise TypeError('ids should be list or tuple')
# Move objects in temporary space to the queue
def restore(self, ids):
if ids is None:
sql_restore = (
'UPDATE {table_name} SET temporary = 0 WHERE temporary != 0 '
).format(table_name=self.tableName)
elif isinstance(ids, (list, tuple)):
placeholders_str = ','.join([' %s'] * len(ids))
sql_restore = (
'UPDATE {table_name} SET temporary = 0 '
'WHERE temporary != 0 AND id in ({placeholders} ) '
).format(table_name=self.tableName, placeholders=placeholders_str)
else:
raise TypeError('ids should be list or tuple or None')
try:
self.execute(sql_restore)
self.commit()
except Exception as _e:
self.rollback()
raise _e
# update a object by its id with some conditions
def update(self, id, item=None, score=None, temporary=None, cond_score=None):
try:
retVal = self._update(id, item, score, temporary, cond_score)
self.commit()
except Exception as _e:
self.rollback()
raise _e
else:
return retVal
| 37.546642 | 129 | 0.523627 |
f7631ddc2a3d34dcf72feb3fd51cf1d99205e959
| 4,540 |
py
|
Python
|
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
|
wbear2/ambari
|
a1891193984da47015cd5483b5b95e040677d7df
|
[
"Apache-2.0"
] | 5 |
2018-06-03T05:19:40.000Z
|
2021-04-16T17:10:49.000Z
|
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
|
wbear2/ambari
|
a1891193984da47015cd5483b5b95e040677d7df
|
[
"Apache-2.0"
] | null | null | null |
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
|
wbear2/ambari
|
a1891193984da47015cd5483b5b95e040677d7df
|
[
"Apache-2.0"
] | 6 |
2019-05-07T13:24:39.000Z
|
2021-02-15T14:12:37.000Z
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
import datetime, sys, socket
import resource_management.libraries.functions
@patch.object(resource_management.libraries.functions, "get_unique_id_and_date", new = MagicMock(return_value=''))
@patch("socket.socket", new = MagicMock())
class TestServiceCheck(RMFTestCase):
@patch("sys.exit")
def test_service_check_default(self, sys_exit_mock):
self.executeScript("2.0.6/services/HIVE/package/scripts/service_check.py",
classname="HiveServiceCheck",
command="service_check",
config_file="default.json"
)
self.assertResourceCalled('File', '/tmp/hcatSmoke.sh',
content = StaticFile('hcatSmoke.sh'),
mode = 0755,
)
self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke prepare',
logoutput = True,
path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
tries = 3,
user = 'ambari-qa',
try_sleep = 5,
)
self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke',
logoutput = True,
user = 'hdfs',
conf_dir = '/etc/hadoop/conf',
keytab=UnknownConfigurationMock(),
kinit_path_local='/usr/bin/kinit',
security_enabled=False
)
self.assertResourceCalled('Execute', ' /tmp/hcatSmoke.sh hcatsmoke cleanup',
logoutput = True,
path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
tries = 3,
user = 'ambari-qa',
try_sleep = 5,
)
self.assertNoMoreResources()
@patch("sys.exit")
def test_service_check_secured(self, sys_exit_mock):
self.executeScript("2.0.6/services/HIVE/package/scripts/service_check.py",
classname="HiveServiceCheck",
command="service_check",
config_file="secured.json"
)
self.assertResourceCalled('File', '/tmp/hcatSmoke.sh',
content = StaticFile('hcatSmoke.sh'),
mode = 0755,
)
self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke prepare',
logoutput = True,
path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
tries = 3,
user = 'ambari-qa',
try_sleep = 5,
)
self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke',
logoutput = True,
user = 'hdfs',
conf_dir = '/etc/hadoop/conf',
keytab='/etc/security/keytabs/hdfs.headless.keytab',
kinit_path_local='/usr/bin/kinit',
security_enabled=True
)
self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /tmp/hcatSmoke.sh hcatsmoke cleanup',
logoutput = True,
path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
tries = 3,
user = 'ambari-qa',
try_sleep = 5,
)
self.assertNoMoreResources()
| 45.858586 | 194 | 0.571366 |
f76328d8935b4c347cc2d77471aadde54c8947dd
| 16,711 |
py
|
Python
|
db_factory/manager.py
|
ankit-shrivastava/db-factory
|
75ff83e94a5bdbc324f95c5b5ff82a28fb489486
|
[
"MIT"
] | 1 |
2021-01-04T13:39:55.000Z
|
2021-01-04T13:39:55.000Z
|
database_factory/manager.py
|
shrivastava-v-ankit/db-factory
|
d83de125cf392969036a5da9581db3b38e5fa047
|
[
"MIT"
] | null | null | null |
database_factory/manager.py
|
shrivastava-v-ankit/db-factory
|
d83de125cf392969036a5da9581db3b38e5fa047
|
[
"MIT"
] | 1 |
2021-03-16T03:04:13.000Z
|
2021-03-16T03:04:13.000Z
|
#!/usr/bin/env python
"""
File holds the module of Migration database manager and decide to connect with
multiple databases using the configuration parameters.
URI of database handled automatically for multiple databases using SQLAlchemy
"""
import os
import logging
import traceback
from urllib.parse import quote_plus as urlquote
from pandas import DataFrame
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from sqlalchemy import MetaData
from .common.common import Common
from .operations import Operations
logger = logging.getLogger(__name__)
SUPPORTED_ENGINE = ["postgres", "mysql", "mariadb",
"snowflake", "bigquery", "sqlite"]
SUPPORTED_SECRET_MANAGER_CLOUD = ["aws", "gcp"]
class DatabaseManager(object):
"""
Class handle the Database Manager using SQLAlchemy Dialects for different databases.
********
Methods:
--------
__init__: Initaization functions
fetch_from_secret: Method to fetch the values from Cloud Secret
Manager Service.
create_uri: Method uses the initalization parameter and
create the uri for the provided engine with
proper driver.
create_session: Method to create the SQLAlchemy session for the
initalized the engine type.
execute_sql: Function to execute DML or DDL queries and return
with rows if rows exist.
execute_df: Function to execute Pandas DataFrame object.
get_df: Function to execute DML select queries and return
as Pandas DataFrame.
object
"""
def __init__(self,
engine_type: str,
database: str,
sqlite_db_path: str = os.environ["HOME"],
username: str = None,
password: str = None,
schema: str = "public",
host: str = None,
port: str = None,
snowflake_role: str = None,
snowflake_warehouse: str = None,
snowflake_account: str = None,
secret_id: str = None,
secrete_manager_cloud: str = "aws",
aws_region: str = "us-east-1"
):
"""
Initialization function to initlaize the object
***********
Attributes:
-----------
engine_type: (Required) => Type of Engine of database.
One of the below supported engines:
* postgres
* mysql
* mariadb
* snowflake
* bigquery
* sqlite
database: (Required) => Database name to connect.
Database must be precreated.
sqlite_db_path: (Optional) => Fully qualifiled path where
database will be created. Database file be
named as per database paramater.
Default: Current user home directory
username: (Optional) => Username to connect database.
User should have all permissions on
database. This value can be set in secret
manager rather as plain text.
password: Optional) => Password as plain text to
connect database. This value can be set in
secret manager rather as plain text.
schema: (Optional) => Name of Schema of database.
Valid for Snowflake and Postgres.
Default: is 'public'
host: (Optional) => Hostname of IP address of RDS
server. This value can be set in secret
manager rather as plain text.
port: (Optional) => Port of RDS server. This
value can be set in secret manager rather
as plain text.
snowflake_role: (Optional) => Snowflake role for connection.
This value can be set in secret manager
rather as plain text.
snowflake_warehouse: (Optional) => Snowflake wharehouse for
connection. This value can be set in secret
manager rather as plain text.
snowflake_account: (Optional) => Snowflake account for
connection. This value can be set in secret
manager rather as plain text.
secret_id: (Optional) => Prefered way to set the json
object of connection parameters in secret
manager services of AWS or GCP.
If running on AWS / GCP servers then server
should have permissions to read the secrets
from secret manager service.
AWS / GCP credentials should be set as
default and will be fetched from server
metadata.
secrete_manager_cloud: (Optional) => Prefered way to get secrets.
Default: is 'aws'
One of supported secret manager service
cloud provider:
* aws
* gcp
aws_region: (Optional) => AWS region for secret manager
service.
Default: is 'us-east-1'
"""
self.engine_type = engine_type
self.database = database
self.sqlite_db_path = sqlite_db_path
self.username = username
self.password = password
self.schema = schema
self.host = host
self.port = port
self.snowflake_role = snowflake_role
self.snowflake_account = snowflake_account
self.snowflake_warehouse = snowflake_warehouse
self.secret_id = secret_id
self.secrete_manager_cloud = secrete_manager_cloud
self.aws_region = aws_region
self.engine = None
self.session = None
def fetch_from_secret(self):
"""
Method to fetch the values from Cloud Secret Manager Service.
Use the class variables for the paramaters.
*******
Return:
-------
secret: Secrets if secret id is provided else None
"""
secret = None
if self.secret_id and self.secrete_manager_cloud:
logger.info(f'Fetch secrets from cloud secret manager service')
try:
secret = Common.get_secret(
secret_id=self.secret_id,
secrete_manager_cloud=self.secrete_manager_cloud,
aws_region=self.aws_region)
except Exception as err:
logger.exception(
f'Failed to fetch secrets from the Secret Manager Service', err)
else:
logger.info(
f'Secret id is not set. Will use plain authentication.')
return secret
def create_uri(self):
"""
Method uses the initalization parameter and create the uri for the
the provided engine with proper driver.
Use the class variables for the paramaters.
*******
Return:
-------
uri: URI required for creating SQLAlchemy
connection.
param: Extra kwargs for SQLAlchemy connection.
is_not_dialect_desc: True if want to remove the support of
SQLAlchemy Dialects description.
"""
logger.info(f"Create URI for the engine '{self.engine}'")
if self.engine_type not in SUPPORTED_ENGINE:
msg = f"Unsupported engine '{self.engine_type}'. Supported are '{SUPPORTED_ENGINE}'"
logger.error(msg)
raise ValueError(msg)
# Fetch the secret first to initalize the values.
secret = self.fetch_from_secret()
if secret:
# Normalize the secret to upper key to ensure corectness of dictonary
secret = Common.normaize_connection_dict(connection_dict=secret,
is_to_upper=True)
if "USERNAME" in secret:
self.username = secret["USERNAME"]
if "PASSWORD" in secret:
self.password = secret["PASSWORD"]
if "SCHEMA" in secret:
self.schema = secret["SCHEMA"]
if "HOST" in secret:
self.host = secret["HOST"]
if "PORT" in secret:
self.port = secret["PORT"]
if "SNOWFLAKE_ROLE" in secret:
self.snowflake_role = secret["SNOWFLAKE_ROLE"]
if "SNOWFLAKE_ACCOUNT" in secret:
self.snowflake_account = secret["SNOWFLAKE_ACCOUNT"]
if "SNOWFLAKE_WAREHOUSE" in secret:
self.snowflake_warehouse = secret["SNOWFLAKE_WAREHOUSE"]
if self.password:
self.password = urlquote(self.password)
is_not_dialect_desc = False
param = None
logger.info(
f'SQLAlchemy Dialects will be created for database type: {self.engine_type}')
if self.engine_type in ["sqlite"]:
uri = 'sqlite:///' + os.path.join(self.sqlite_db_path,
f"{self.database}.db")
elif self.engine_type in ["postgres"]:
uri = f"postgres+pg8000://{self.username}:{self.password}@{self.host}:{self.port}/{self.database}"
param = dict(client_encoding="utf8")
is_not_dialect_desc = True
elif self.engine_type in ["mysql", "mariadb"]:
uri = f"mysql+pymysql://{self.username}:{self.password}@{self.host}:{self.port}/{self.database}?charset=utf8mb4"
elif self.engine_type in ["snowflake"]:
from snowflake.sqlalchemy import URL
uri = URL(
account=self.snowflake_account,
user=self.username,
password=self.password,
database=self.database,
schema=self.schema,
warehouse=self.snowflake_warehouse,
role=self.snowflake_role,
)
elif self.engine_type in ["bigquery"]:
from .cloud.gcp.auth import GcpAuthManager
gcp_service_file = os.environ.get(
'GOOGLE_APPLICATION_CREDENTIALS') or None
gcp_auth = GcpAuthManager(service_accout_file=gcp_service_file)
project_name = gcp_auth.get_project_name()
uri = f"bigquery://{project_name}/{self.database}"
if gcp_service_file:
param = dict(credentials_path=gcp_service_file)
return uri, param, is_not_dialect_desc
def create_session(self):
"""
Method to create the SQLAlchemy session for the initalized the engine
type.
Use the class variables and update to hold the sessions.
"""
try:
logger.info(f'Creating SQLAlchemy Dialects session scope.')
uri, param, is_not_dialect_desc = self.create_uri()
if param:
self.engine = create_engine(uri, echo=True, **param)
else:
self.engine = create_engine(uri, echo=True)
if is_not_dialect_desc:
# https: // github.com/sqlalchemy/sqlalchemy/issues/5645
self.engine.dialect.description_encoding = None
self.session = scoped_session(sessionmaker(bind=self.engine))
logger.info(f'SQLAlchemy Dialects session scope is created')
except Exception as err:
logger.exception(
f'Failed to create session with given paramaters for Database', err)
traceback.print_tb(err.__traceback__)
# Propagate the exception
raise
def execute_sql(self, sql: str):
"""
Function to execute DML or DDL queries and return if rows exist.
***********
Attributes:
-----------
sql: (Required) => Plain DDL or DML query to execute on
Database.
Default is None. One of paramater sql_query or
panda_df is required. If both is provided panda_df
will be taken as priority and sql_query is ignored.
*******
Return:
-------
rows: If rows in case of DML select queries else none.
"""
rows = None
db_operation = Operations(self.session)
rows = db_operation.execute(sql=sql)
return rows
def execute_df(self,
panda_df: DataFrame,
table_name: str,
chunk_size: int = None,
exist_action: str = "append"):
"""
Function to execute Pandas DataFrame object to create, replace or
append table with DataFrame table objects.
***********
Attributes:
-----------
panda_df: (Required) => Pandas DataFrame table object to
update the table.
Default is None. One of paramater sql_query or
panda_df is required. If both is provided panda_df
will be taken as priority and sql_query is ignored.
table_name: (Optional) => Name of table .
chunk_size: (Optional) => chunck size to update the table in
chunks for performance rather than insert row one
by one.
Default: 1 row at a time.
exist_action: (Optional) => Action on if table already exist.
Default: append mode. Others modes are replace
or fail.
*******
Return:
-------
rows: If rows in case of DDL queries else none.
"""
rows = None
db_operation = Operations(self.session)
rows = db_operation.execute(panda_df=panda_df,
table_name=table_name,
chunk_size=chunk_size,
exist_action=exist_action)
return rows
def get_df(self,
sql: str,
chunk_size: int = None):
"""
Function to execute DML select queries and return Pandas DataFrame
object.
***********
Attributes:
-----------
sql: (Required) => Plain DDL or DML query to execute on
Database.
Default is None. One of paramater sql_query or
panda_df is required. If both is provided panda_df
will be taken as priority and sql_query is ignored.
chunk_size: (Optional) => If specified, return an iterator
where chunk_size is the number of rows to include
in each chunk.
Default: None to include all records.
*******
Return:
-------
rows: If rows in case of DDL queries else none.
"""
rows = None
db_operation = Operations(self.session)
rows = db_operation.execute(sql=sql,
chunk_size=chunk_size,
get_df=True)
return rows
| 41.673317 | 124 | 0.510323 |
f763489073221a0058c43b650f77849bca2574e7
| 167 |
py
|
Python
|
Scripts/django-admin.py
|
Nicozstory/Try-Django-1.11
|
5f81571d3342d1991494f9aac425fb79b64fd425
|
[
"bzip2-1.0.6"
] | null | null | null |
Scripts/django-admin.py
|
Nicozstory/Try-Django-1.11
|
5f81571d3342d1991494f9aac425fb79b64fd425
|
[
"bzip2-1.0.6"
] | null | null | null |
Scripts/django-admin.py
|
Nicozstory/Try-Django-1.11
|
5f81571d3342d1991494f9aac425fb79b64fd425
|
[
"bzip2-1.0.6"
] | null | null | null |
#!c:\users\alexa\myvirtualenv2\foodtasker\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 27.833333 | 60 | 0.796407 |
f7635c0d098d13616e07a22c1baa45da48b78323
| 557 |
py
|
Python
|
tests/test_constants.py
|
yveso/tyfbaf
|
f1b6bf33d275f20fad71a2d20b92665f88755361
|
[
"MIT"
] | null | null | null |
tests/test_constants.py
|
yveso/tyfbaf
|
f1b6bf33d275f20fad71a2d20b92665f88755361
|
[
"MIT"
] | null | null | null |
tests/test_constants.py
|
yveso/tyfbaf
|
f1b6bf33d275f20fad71a2d20b92665f88755361
|
[
"MIT"
] | null | null | null |
import pytest
from tyfbaf import constants
@pytest.fixture
def ugly_hack():
"""Ugly hack to disable autouse fixture in conftest.py..."""
constants.SERVER_NAME = ""
def test_server_name_default(ugly_hack):
assert constants.SERVER_NAME == ""
def test_port_default():
assert constants.PORT == 6405
def test_base_headers_default():
assert constants.BASE_HEADERS == {
"Content-Type": "application/json",
"Accept": "application/json",
}
def test_current_token_default():
assert constants.CURRENT_TOKEN == ""
| 19.206897 | 64 | 0.698384 |
f7639c6229b8d29dfd2141ec52ff0c64883adbe2
| 2,502 |
py
|
Python
|
venv/Lib/site-packages/pybtex/markup/__init__.py
|
PeerHerholz/guideline_jupyter_book
|
ce445e4be0d53370b67708a22550565b90d71ac6
|
[
"BSD-3-Clause"
] | 2 |
2021-02-16T16:17:07.000Z
|
2021-11-08T20:27:13.000Z
|
venv/Lib/site-packages/pybtex/markup/__init__.py
|
PeerHerholz/guideline_jupyter_book
|
ce445e4be0d53370b67708a22550565b90d71ac6
|
[
"BSD-3-Clause"
] | null | null | null |
venv/Lib/site-packages/pybtex/markup/__init__.py
|
PeerHerholz/guideline_jupyter_book
|
ce445e4be0d53370b67708a22550565b90d71ac6
|
[
"BSD-3-Clause"
] | 4 |
2020-11-14T17:05:36.000Z
|
2020-11-16T18:44:54.000Z
|
from __future__ import unicode_literals
# Copyright (c) 2006-2019 Andrey Golovigin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from pybtex.scanner import Scanner, Literal, PybtexSyntaxError
from pybtex.richtext import String, Text, Protected
class LaTeXParser(Scanner):
LBRACE = Literal(u'{')
RBRACE = Literal(u'}')
def parse(self, level=0):
"""
>>> LaTeXParser('abc').parse()
Text('abc')
>>> LaTeXParser('abc{def}').parse()
Text('abc', Protected('def'))
>>> LaTeXParser('abc{def {xyz}} !').parse()
Text('abc', Protected('def ', Protected('xyz')), ' !')
"""
return Text(*self.iter_string_parts(level=level))
def iter_string_parts(self, level=0):
while True:
token = self.skip_to([self.LBRACE, self.RBRACE])
if not token:
remainder = self.get_remainder()
if remainder:
yield String(remainder)
if level != 0:
raise PybtexSyntaxError('unbalanced braces', self)
break
elif token.pattern is self.LBRACE:
yield String(token.value[:-1])
yield Protected(*self.iter_string_parts(level=level + 1))
else: # brace.pattern is self.RBRACE
yield String(token.value[:-1])
if level == 0:
raise PybtexSyntaxError('unbalanced braces', self)
break
| 39.09375 | 73 | 0.648681 |
f763b50d56b531bc659599c89d467e89b5cc9948
| 712 |
py
|
Python
|
examples/basic/robot.py
|
benjiboy50fonz/robotpy-ctre-draft
|
36810d1ca8f02b774f361df3c514d8d8bba00159
|
[
"Apache-2.0"
] | null | null | null |
examples/basic/robot.py
|
benjiboy50fonz/robotpy-ctre-draft
|
36810d1ca8f02b774f361df3c514d8d8bba00159
|
[
"Apache-2.0"
] | null | null | null |
examples/basic/robot.py
|
benjiboy50fonz/robotpy-ctre-draft
|
36810d1ca8f02b774f361df3c514d8d8bba00159
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import wpilib
import ctre
class MyRobot(wpilib.IterativeRobot):
"""
This is a short sample program demonstrating how to use the basic throttle
mode of the TalonSRX
"""
def robotInit(self):
self.motor = ctre.WPI_TalonSRX(1) # Initialize the TalonSRX on device 1.
def disabledPeriodic(self):
# Ensure the motor is disabled when the robot is disabled.
self.motor.disable()
def teleopPeriodic(self):
# Set the motor's output to half power.
# This takes a number from -1 (100% speed in reverse) to +1 (100%
# speed going forward)
self.motor.set(0.5)
if __name__ == "__main__":
wpilib.run(MyRobot)
| 24.551724 | 81 | 0.65309 |
f763d016061f2f08e13ed4e9dfad257490becc39
| 27,721 |
py
|
Python
|
src/grammar_learner/pqa_table.py
|
vsbogd/language-learning
|
601e7bc7f97a0b6c1f713f8108fc6e81d492e921
|
[
"MIT"
] | 21 |
2018-01-30T08:18:20.000Z
|
2020-02-18T08:15:53.000Z
|
src/grammar_learner/pqa_table.py
|
vsbogd/language-learning
|
601e7bc7f97a0b6c1f713f8108fc6e81d492e921
|
[
"MIT"
] | 3 |
2018-02-09T18:22:45.000Z
|
2018-07-10T10:31:25.000Z
|
src/grammar_learner/pqa_table.py
|
vsbogd/language-learning
|
601e7bc7f97a0b6c1f713f8108fc6e81d492e921
|
[
"MIT"
] | 10 |
2018-01-30T10:31:43.000Z
|
2021-01-26T07:54:24.000Z
|
# language-learning/src/grammar_learner/pqa_table.py # 190410
# Test Grammar Learner to fill in ULL Project Plan Parses spreadshit
import logging
# TODO: refactor 81217 wide_rows (archived) and ppln.py (make independent)
import os, sys, time
from ..common import handle_path_string
from ..grammar_tester import test_grammar
from ..common.optconst import *
from .utl import sec2string, kwa
from .read_files import check_dir
from .learner import learn_grammar, learn # 81126 learn returns rules, log
from .write_files import list2file
def params(corpus_, dataset_, module_path_, out_dir, **kwargs): # 90201
corpus = kwargs['corpus'] if 'corpus' in kwargs else corpus_
dataset = kwargs['dataset'] if 'dataset' in kwargs else dataset_
module_path = kwargs['module_path'] if 'module_path' in kwargs else module_path_
if 'input_parses' in kwargs:
if module_path in kwargs['input_parses']:
input_parses = kwargs['input_parses']
else: input_parses = module_path + kwargs['input_parses']
else: input_parses = module_path + '/data/' + corpus + '/' + dataset
if type(kwargs['clustering']) is str:
clustering = kwargs['clustering']
else:
clustering = kwargs['clustering'][0]
if check_dir(input_parses, create=False, verbose='min'):
batch_dir = out_dir + '/' + corpus
spaces = ['w', 'c', 'd'] # 'words', 'connectors', 'disjuncts'
context = spaces[kwargs['context']]
rules = spaces[kwargs['grammar_rules']]
if kwargs['grammar_rules'] == -1:
rules = 'interconnected'
elif kwargs['grammar_rules'] == -2:
rules = 'linked'
if kwargs['clustering'] == 'random':
context = ''
wtf = 'Random-clusters'
else:
wtf = abrvlg(**kwargs)
if kwargs['left_wall'] in ['', 'none']:
left_wall = 'no-LW'
else:
left_wall = 'LW'
if kwargs['period']:
period = 'RW'
else:
period = 'no-RW'
generalization = ['no-gen', 'gen-cats', 'gen-rules', 'gen-both']
gen = 0
if 'categories_generalization' in kwargs:
if kwargs['categories_generalization'] not in ['', 'off', 'none']:
gen += 1
if 'rules_generalization' in kwargs:
if kwargs['rules_generalization'] not in ['', 'off', 'none']:
gen += 2
prj_dir = batch_dir + '_' + dataset + '_' + context + wtf + rules \
+ '_' + generalization[gen]
if 'cluster_range' in kwargs:
if type(kwargs['cluster_range']) is int:
prj_dir = prj_dir + '_' + str(kwargs['cluster_range']) + 'c'
if 'min_word_count' in kwargs and kwargs['min_word_count'] > 1:
prj_dir = prj_dir + '_mwc=' + str(kwargs['min_word_count'])
if len(kwargs['clustering']) > 3 \
and type(kwargs['clustering'][3]) is int:
prj_dir = prj_dir + '_' + str(['clustering'][3]) + 'nn'
# number of nearest neighbors in connectivity constraints # 81116
if check_dir(prj_dir, create=True, verbose='none'):
output_categories = prj_dir # no file name ⇒ auto file name
output_grammar = prj_dir # no file name ⇒ auto file name
return input_parses, output_categories, output_grammar
else:
return input_parses, out_dir, out_dir
else:
raise FileNotFoundError('File not found', input_parses)
def pqa_meter(dict_path, op, cp, rp, **kwargs): # TODO: restore previous
# op,cp,rp: ex. output_path, corpus_path, reference_path - changed 90131:
corpus_path = cp if len(cp) > 0 else kwargs['corpus_path']
reference_path = rp if len(rp) > 0 else kwargs['reference_path']
if len(op) > 0:
output_path = op
grammar_path = op
else:
grammar_path = kwargs['output_grammar']
output_path = kwargs['out_path'] if 'out_path' in kwargs \
else kwargs['output_grammar']
template_path = handle_path_string("tests/test-data/dict/poc-turtle") # FIXME:WTF?
linkage_limit = kwargs['linkage_limit'] if 'linkage_limit' in kwargs else 1000
if 'linkage_limit' == 0:
return 0.0, 0.0, 0.0, 0.0 # table_rows: get grammar for further tests
options = BIT_SEP_STAT | BIT_LG_EXE | BIT_NO_LWALL | BIT_NO_PERIOD | BIT_STRIP | BIT_RM_DIR | BIT_DPATH_CREATE | BIT_LOC_LANG | BIT_PARSE_QUALITY | BIT_ULL_IN # | BIT_OUTPUT_DIAGRAM #| BIT_SEP_STAT
# BIT_ULL_IN :: use ull parses as test corpus
# BIT_CAPS :: preserve caps in parses, process inside Grammar Learner
pa, f1, precision, recall = \
test_grammar(corpus_path, output_path, dict_path, grammar_path,
template_path, linkage_limit, options, reference_path)
return float(pa), float(f1), float(precision), float(recall)
def table_rows(lines, out_dir, cp, rp, runs=(1, 1), **kwargs):
# cp: corpus_path, rp: reference_path for grammar tester
logger = logging.getLogger(__name__ + ".table_rows")
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path: sys.path.append(module_path)
header = ['Line', 'Corpus', 'Parsing', 'LW', 'RW', 'Gen.', 'Space', 'Rules',
'Silhouette', 'PA', 'PQ', 'F1']
spaces = ''
if kwargs['clustering'] == 'random':
spaces += 'RND'
else:
if kwargs['context'] == 1:
spaces += 'c'
else:
spaces += 'd'
if kwargs['word_space'] == 'vectors':
spaces += 'DRK'
elif kwargs['word_space'] == 'discrete':
spaces += 'ILE'
elif kwargs['word_space'] == 'sparse':
if kwargs['clustering'][0] == 'agglomerative':
spaces += 'ALE'
elif kwargs['clustering'][0] in ['k-means', 'kmeans']:
spaces += 'KLE'
elif kwargs['clustering'][0][:4] == 'mean': # ['mean shift', ...]
spaces += 'MLE'
else:
spaces += '?LE'
else:
spaces += '???'
if kwargs['grammar_rules'] == 1:
spaces += 'c'
elif kwargs['grammar_rules'] == -1: # 80825 interconnected connector-style
spaces += 'ic'
elif kwargs['grammar_rules'] == -2: # 80825 interconnected disjunct-style
spaces += 'id'
else:
spaces += 'd'
details = []
average = []
for i, line in enumerate(lines):
if line[3] != 0:
kwargs['left_wall'] = 'LEFT-WALL'
lw = 'LW'
else:
kwargs['left_wall'] = ''
lw = ' --- '
if line[4] != 0:
kwargs['period'] = True
dot = ' . '
else:
kwargs['period'] = False
dot = ' --- '
gen = line[5] # none | rules | categories | both
if gen in ['rules', 'both']:
kwargs['rules_generalization'] = 'jaccard'
else:
kwargs['rules_generalization'] = 'off'
if gen in ['categories', 'both']:
kwargs['categories_generalization'] = 'jaccard'
else:
kwargs['categories_generalization'] = 'off'
if kwargs['grammar_rules'] == 1 and gen != 'none':
continue
corpus = line[1]
dataset = line[2]
if 'input_parses' in kwargs:
del kwargs['input_parses']
ip, oc, og = params(corpus, dataset, module_path, out_dir, **kwargs)
# ip, oc, og: input path, output categories, output grammar
kwargs['input_parses'] = ip
kwargs['output_grammar'] = og
kwargs['output_categories'] = oc # = output_grammar if absent or ''
pa = [] # «parse-ability»
pq = [] # «parse quality» ~ recall
si = [] # Silhouette index
fm = [] # F-measure (F1)
rules = []
for j in range(runs[0]):
try: # if True: #
re = learn_grammar(**kwargs)
if 'silhouette' in re:
s = round(re['silhouette'], 2)
s_str = str(s)
else:
s = 0
s_str = ' --- '
except: # else: #
logger.critical('pqa_table.py table_rows:',
'learn_grammar(**kwargs) ⇒ exception:\n',
sys.exc_info())
pa.append(0.)
pq.append(0.)
rules.append(0)
det_line = [line[0], corpus, dataset, lw, dot, gen, spaces,
' fail ', ' --- ', ' --- ', ' --- ', ' --- ']
details.append(det_line)
continue
if kwargs['linkage_limit'] > 0:
for k in range(runs[1]):
a, f1, precision, q = pqa_meter(re['grammar_file'],
og, cp, rp, **kwargs)
pa.append(a)
pq.append(q)
fm.append(f1)
si.append(s)
rules.append(re['grammar_rules'])
dline = [line[0], corpus, dataset, lw, dot, gen, spaces,
' ' + str(re['grammar_rules']) + ' ', s_str,
str(round(a * 100)) + '%',
str(round(q * 100)) + '%', str(round(f1, 2))]
details.append(dline)
else:
si.append(s)
rules.append(re['grammar_rules'])
details.append([line[0], corpus, dataset, lw, dot, gen, spaces,
' ' + str(re['grammar_rules']) + ' ',
s_str, ' --- ', ' --- ', ' --- '])
if len(pa) > 0:
pa_str = str(round(sum(pa) * 100 / len(pa))) + '%'
pq_str = str(round(sum(pq) * 100 / len(pa))) + '%'
else:
pa_str = ' --- '
pq_str = ' --- '
if len(si) > 0:
sia = round(sum(si) / len(si), 2)
else:
sia = 0.0
sia_str = str(sia) if sia > 0.005 else ' --- '
if len(fm) > 0:
fm_str = str(round(sum(fm) / len(fm), 2))
else:
fm_str = ' --- '
non_zero_rules = [x for x in rules if x > 0]
if len(non_zero_rules) > 0:
mean_rules = str(round(sum(non_zero_rules) / len(non_zero_rules)))
else:
mean_rules = 'fail'
avg_line = [line[0], corpus, dataset, lw, dot, gen, spaces,
mean_rules, sia_str, pa_str, pq_str, fm_str]
average.append(avg_line)
return average, details, header
def abrvlg(**kwargs):
if kwargs['word_space'] == 'vectors':
return 'DRK'
elif kwargs['word_space'] == 'discrete':
return 'ILE'
elif kwargs['word_space'] == 'sparse':
if kwargs['clustering'][0] == 'agglomerative':
x = list(kwargs['clustering'])
if len(x) < 2: x.append('ward')
if len(x) < 3: x.append('euclidean')
return 'AL' + x[1][0].upper() + x[2][0].upper()
elif kwargs['clustering'][0] in ['k-means', 'kmeans']:
return 'KLE'
elif kwargs['clustering'][0][:4] == 'mean': # mean shift
return 'MLE'
else:
return '?LE'
else:
return '???'
def wide_rows(lines, out_dir, cp, rp, runs=(1, 1), **kwargs):
# cp: (test) corpus_path, rp: reference_path for grammar tester
start = time.time()
logger = logging.getLogger(__name__ + ".wide_rows")
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path: sys.path.append(module_path)
header = ['Line', 'Corpus', 'Parsing', 'Space', 'Linkage', 'Affinity',
'G12n', 'Threshold', 'Rules', 'MWC', 'NN', 'SI',
'PA', 'PQ', 'F1']
if '+' in kwargs['verbose']:
header.append('Top 5 cluster sizes')
linkage = '---'
affinity = '---'
rgt = '---' # rules_generalization_threshold
knn = '---' # k nearest neighbors for connectivity graph
clustering = kwa(['agglomerative', 'ward', 'euclidean'], 'clustering', **kwargs)
if type(clustering) is str:
if clustering == 'kmeans':
clustering = ['kmeans', 'k-means++', 10]
elif clustering == 'agglomerative':
clustering = ['agglomerative', 'ward', 'euclidean']
elif clustering == 'mean_shift':
clustering = ['mean_shift', 'auto']
elif clustering == 'group': # TODO: call ILE clustering?
print('Call ILE clustering from optimal_clusters?')
elif clustering == 'random': # TODO: call random clustering?
print('Call random clustering from optimal_clusters?')
else:
clustering = ['agglomerative', 'ward', 'euclidean']
if len(clustering) > 3:
if type(kwargs['clustering'][3]) is int:
knn = kwargs['clustering'][3]
if clustering[0] == 'agglomerative':
linkage = clustering[1]
if len(clustering) > 2:
affinity = clustering[2]
else:
affinity = 'euclidean'
else:
linkage = clustering[0] # FIXME: all options...
spaces = ''
if kwargs['clustering'] == 'random':
spaces += 'RND'
else:
if kwargs['context'] == 1:
spaces += 'c'
else:
spaces += 'd'
spaces += abrvlg(**kwargs)
if kwargs['grammar_rules'] == 1:
spaces += 'c'
elif kwargs['grammar_rules'] == -1: # interconnected connector-style
spaces += 'ic'
elif kwargs['grammar_rules'] == -2: # interconnected disjunct-style
spaces += 'id'
else:
spaces += 'd'
details = []
average = []
for i, line in enumerate(lines):
corpus = line[1]
dataset = line[2]
if line[3] != 0:
kwargs['left_wall'] = 'LEFT-WALL'
else:
kwargs['left_wall'] = ''
if line[4] != 0:
kwargs['period'] = True
else:
kwargs['period'] = False
gen = line[5] # none | rules | categories | both | old | updated | new
if 'rules_aggregation' in kwargs \
and type(kwargs['rules_aggregation']) is float:
rgt = str(kwargs['rules_aggregation']) # rules g12n threshold
if gen in ['rules', 'both', 'old', 'jaccard']:
kwargs['rules_generalization'] = 'jaccard'
elif gen in ['updated', 'hierarchical', 'hier.', 'HDJ']:
kwargs['rules_generalization'] = 'hierarchical'
gen = 'HDJ' # Hierarchical: Disjuncts Jaccard index similarity
elif gen in ['new', 'fast']:
kwargs['rules_generalization'] = 'fast'
gen = 'fast'
else:
kwargs['rules_generalization'] = 'off'
rgt = '---' # rules_generalization_threshold
if gen in ['categories', 'both']:
kwargs['categories_generalization'] = 'jaccard'
else:
kwargs['categories_generalization'] = 'off'
if kwargs['grammar_rules'] == 1 and gen != 'none':
continue
ip, oc, og = params(corpus, dataset, module_path, out_dir, **kwargs)
# ip, oc, og: input path, output categories, output grammar
kwargs['input_parses'] = ip
kwargs['output_grammar'] = og
kwargs['output_categories'] = oc # = output_grammar if absent or ''
# Averaging :: FIXME: stop averaging?
pa = [] # «parse-ability»
pq = [] # «parse quality»
si = [] # Silhouette index
fm = [] # F-measure (F1)
rules = []
cluster_sizes = []
for j in range(runs[0]):
try: # if True: #
rulez, re = learn(**kwargs)
if len(rulez) < 1: # empty filtered dataset # 190410
msg = [['Error:', 'empty', 'filtered', 'parses', 'dataset', '⇒',
'check', 'max_unparsed_words', 'in', 'kwargs']]
return msg, msg, header, re, rulez
if 'rule_sizes' in re:
cluster_sizes = sorted(re['rule_sizes'].keys(),
reverse=True)[:5]
elif 'cluster_sizes' in re:
cluster_sizes = sorted(re['cluster_sizes'].keys(),
reverse=True)[:5]
if 'silhouette' in re:
s = round(re['silhouette'], 2)
s_str = str(s)
else:
s = 0
s_str = ' --- '
except: # else: #
logger.critical('pqa_table.py wide_rows:',
'learn_grammar(**kwargs) ⇒ exception:\n',
sys.exc_info())
pa.append(0.)
pq.append(0.)
rules.append(0)
det_line = [line[0], corpus, dataset, spaces,
linkage, affinity, gen, ' ---', 'fail',
' ---', ' ---', ' ---', ' ---', ' ---', ' ---']
details.append(det_line)
continue # FIXME: check case
if kwargs['linkage_limit'] > 0:
start = time.time()
for k in range(runs[1]):
a, f1, precision, q = pqa_meter(re['grammar_file'],
og, cp, rp, **kwargs)
pa.append(a)
pq.append(q)
fm.append(f1)
si.append(s)
rules.append(re['grammar_rules'])
dline = [line[0], corpus, dataset, spaces,
linkage, affinity, gen, rgt,
' ' + str(re['grammar_rules']) + ' ',
str(kwargs['min_word_count']), s_str,
str(knn), str(round(a * 100)) + '%',
str(round(q * 100)) + '%', str(round(f1, 2))]
if '+' in kwargs['verbose']:
dline.append(cluster_sizes)
details.append(dline)
else: # kwargs['linkage_limit'] = 0 :: avoid grammar_tester call
si.append(s)
rules.append(re['grammar_rules'])
details.append([line[0], corpus, dataset, spaces,
linkage, affinity, gen, rgt,
' ' + str(re['grammar_rules']) + ' ',
str(kwargs['min_word_count']), s_str,
str(knn), '---', ' ---', ' ---', ' ---'])
if len(pa) > 0:
pa_str = str(round(sum(pa) * 100 / len(pa))) + '%'
pq_str = str(round(sum(pq) * 100 / len(pa))) + '%'
else:
pa_str = ' --- '
pq_str = ' --- '
if len(si) > 0:
sia = round(sum(si) / len(si), 2)
else:
sia = 0.0
sia_str = str(sia) # if sia > 0.005 else ' --- '
if len(fm) > 0:
fm_str = str(round(sum(fm) / len(fm), 2))
else:
fm_str = ' --- '
non_zero_rules = [x for x in rules if x > 0]
if len(non_zero_rules) > 0:
mean_rules = str(round(sum(non_zero_rules) / len(non_zero_rules)))
else:
mean_rules = 'fail'
avg_line = [line[0], corpus, dataset, spaces, linkage, affinity,
gen, rgt, mean_rules, str(kwargs['min_word_count']),
str(knn), sia_str, pa_str, pq_str, fm_str, cluster_sizes]
average.append(avg_line)
re.update({'grammar_test_time': sec2string(time.time() - start)})
stats = []
if 'cleaned_words' in re:
stats.append(['Clean corpus size ', re['cleaned_words']])
if 'grammar_learn_time' in re:
stats.append(['Grammar learn time', re['grammar_learn_time']])
if 'grammar_test_time' in re:
stats.append(['Grammar test time ', re['grammar_test_time']])
if len(stats) > 0:
x = re['corpus_stats_file']
list2file(stats, x[:x.rfind('/')] + '/learn_&_test_stats.txt')
# return average, details, header, re
return average, details, header, re, rulez # 81120 tmp FIXME:DEL rulez?
def wide_table(lines, out_dir, cp, rp, **kwargs): # 81222 FIXME: [»]
# cp,rp: corpus_path, rp: reference_path for grammar tester
# runs = (1,1) (...rows) unused ⇒ FIXME:DEL from calls! [»]
# ? module_path = os.path.abspath(os.path.join('..'))
# ? if module_path not in sys.path: sys.path.append(module_path)
header = ['Line', 'Corpus', 'Parsing', 'Space', 'Linkage', 'Affinity',
'G12n', 'Threshold', 'Rules', 'MWC', 'NN', 'SI',
'PA', 'PQ', 'F1']
if 'log+' in kwargs['verbose']:
header.append('Top 5 cluster sizes')
linkage = '---'
affinity = '---'
rgt = '---' # rules_generalization_threshold
knn = '---' # k nearest neighbors for connectivity graph
clustering = kwa(['agglomerative', 'ward', 'euclidean'], 'clustering',
**kwargs)
if type(clustering) is str:
if clustering == 'kmeans':
clustering = ['kmeans', 'k-means++', 10]
elif clustering == 'agglomerative':
clustering = ['agglomerative', 'ward', 'euclidean']
elif clustering == 'mean_shift':
clustering = ['mean_shift', 'auto']
elif clustering == 'group': # TODO: call ILE clustering?
print('Call ILE clustering from optimal_clusters?')
elif clustering == 'random': # TODO: call random clustering?
print('Call random clustering from optimal_clusters?')
else:
clustering = ['agglomerative', 'ward', 'euclidean']
if len(clustering) > 3:
if type(kwargs['clustering'][3]) is int:
knn = kwargs['clustering'][3]
if clustering[0] == 'agglomerative':
linkage = clustering[1]
if len(clustering) > 2:
affinity = clustering[2]
else: affinity = 'euclidean'
else:
linkage = clustering[0] # FIXME: all options...
spaces = ''
if kwargs['clustering'] == 'random':
spaces += 'RND'
else:
if kwargs['context'] == 1:
spaces += 'c'
else: spaces += 'd'
spaces += abrvlg(**kwargs)
if kwargs['grammar_rules'] == 1:
spaces += 'c'
elif kwargs['grammar_rules'] == -1: # interconnected connector-style
spaces += 'ic'
elif kwargs['grammar_rules'] == -2: # interconnected disjunct-style
spaces += 'id'
else: spaces += 'd'
details = []
for i, line in enumerate(lines):
corpus = line[1]
dataset = line[2]
if line[3] != 0:
kwargs['left_wall'] = 'LEFT-WALL'
lw = 'LW'
else:
kwargs['left_wall'] = ''
lw = ' --- '
if line[4] != 0:
kwargs['period'] = True
dot = ' . '
else:
kwargs['period'] = False
dot = ' --- '
gen = line[5] # none | rules | categories | both | old | updated | new
if 'rules_aggregation' in kwargs \
and type(kwargs['rules_aggregation']) is float:
rgt = str(kwargs['rules_aggregation'])
if gen in ['rules', 'both', 'old', 'jaccard']:
kwargs['rules_generalization'] = 'jaccard'
elif gen in ['updated', 'hierarchical', 'hier.', 'HDJ']:
kwargs['rules_generalization'] = 'hierarchical'
gen = 'HDJ'
elif gen in ['new', 'fast']:
kwargs['rules_generalization'] = 'fast'
gen = 'fast'
else:
kwargs['rules_generalization'] = 'off'
rgt = '---' # rules_generalization_threshold
if gen in ['categories', 'both']:
kwargs['categories_generalization'] = 'jaccard'
else: kwargs['categories_generalization'] = 'off'
if kwargs['grammar_rules'] == 1 and gen != 'none': continue
ip, oc, og = params(corpus, dataset, module_path, out_dir, **kwargs)
# ip, oc, og: input path, output categories, output grammar
kwargs['input_parses'] = ip
kwargs['output_grammar'] = og
kwargs['output_categories'] = oc # = output_grammar if absent or ''
if True: # try: #
rulez, re = learn(**kwargs)
if 'rule_sizes' in re:
cluster_sizes = sorted(re['rule_sizes'].keys(), reverse=True)[:5]
elif 'cluster_sizes' in re:
cluster_sizes = sorted(re['cluster_sizes'].keys(), reverse=True)[:5]
if 'silhouette' in re:
s = round(re['silhouette'], 2)
s_str = str(s)
else:
s = 0
s_str = ' --- '
else: # except: #
logger.critical('pqa_table.py wide_table:',
'learn_grammar(**kwargs) ⇒ exception:\n',
sys.exc_info())
dline = [line[0], corpus, dataset, spaces,
linkage, affinity, gen, ' ---', 'fail',
' ---', ' ---', ' ---', ' ---', ' ---', ' ---']
details.append(dline)
continue # FIXME: check case
if kwargs['linkage_limit'] > 0:
start = time.time()
a, f1, precision, q = pqa_meter(re['grammar_file'],
og, cp, rp, **kwargs)
dline = [line[0], corpus, dataset, spaces,
linkage, affinity, gen, rgt,
' ' + str(re['grammar_rules']) + ' ',
str(kwargs['min_word_count']),
s_str, str(knn), str(round(a * 100)) + '%',
str(round(q * 100)) + '%', str(round(f1, 2))]
if 'log+' in kwargs['verbose']:
dline.append(cluster_sizes)
else:
rules.append(re['grammar_rules'])
dline = [line[0], corpus, dataset, spaces,
linkage, affinity, gen, rgt,
' ' + str(re['grammar_rules']) + ' ',
str(kwargs['min_word_count']),
s_str, str(knn), ' ---', ' ---', ' ---']
details.append(dline)
re.update({'grammar_test_time': sec2string(time.time() - start)})
stats = []
if 'grammar_learn_time' in re:
stats.append(['Grammar learn time', re['grammar_learn_time']])
if 'grammar_test_time' in re:
stats.append(['Grammar test time ', re['grammar_test_time']])
if len(stats) > 0:
x = re['corpus_stats_file']
list2file(stats, x[:x.rfind('/')] + '/learn_&_test_stats.txt')
return header, details, re
# Notes:
# 80802 /src/poc05.py restructured, def params moved here, further dev here
# legacy pqa_table.py renamed pqa05.py ~ poc05+pqa05=baseline (DEL later)
# 80825 kwargs['grammar_rules'] == -1,-2: interconnected clusters
# -1: connectors #Cxx: {C01Cxx- or ... CnCxx-} and {CxxC01+ or ... CxxCn+}
# -2: disjuncts #Cxx: (C01Cxx-) or (C02Cxx-) ... or (CxxCn+)
# 81018 unified table_rows, ready for next test_grammar, table: PA/PQ/F1
# 81114 wider table for agglomerative clustering tests
# 81120 wide_rows
# 81210 wide_rows + min_word_count
# 81220 wide_table ⇒ FIXME in 2019, replace wide_row in 2019 .ipynb tests.
# 81231 cleanup
# 190221 tweak min_word_count (line 69)
# 190410 fix empty filtered dataset issue
| 41.312966 | 202 | 0.509 |
f763f76e7edc38e0cd469c0deb781726caef6e8f
| 454 |
py
|
Python
|
code/partition.py
|
Koomook/nsmc
|
64fb83769072be3822f663383d0855dd66c92855
|
[
"CC0-1.0"
] | 460 |
2015-08-29T07:16:07.000Z
|
2022-03-23T07:46:58.000Z
|
code/partition.py
|
Koomook/nsmc
|
64fb83769072be3822f663383d0855dd66c92855
|
[
"CC0-1.0"
] | 1 |
2020-06-02T07:51:28.000Z
|
2020-06-04T00:11:17.000Z
|
code/partition.py
|
Koomook/nsmc
|
64fb83769072be3822f663383d0855dd66c92855
|
[
"CC0-1.0"
] | 219 |
2015-12-05T05:56:05.000Z
|
2022-03-31T01:59:05.000Z
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np; np.random.seed(1234)
import pandas as pd
ntrain = 150000
data = pd.read_csv('../ratings.txt', sep='\t', quoting=3)
data = pd.DataFrame(np.random.permutation(data))
trn, tst = data[:ntrain], data[ntrain:]
header = 'id document label'.split()
trn.to_csv('../ratings_train.txt', sep='\t', index=False, header=header)
tst.to_csv('../ratings_test.txt', sep='\t', index=False, header=header)
| 26.705882 | 72 | 0.678414 |
f7640da42683b6fa666e2e9f252b2a4f6a180d3f
| 6,727 |
py
|
Python
|
appengine/predator/analysis/linear/changelist_features/touch_crashed_directory.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | 2 |
2021-04-13T21:22:18.000Z
|
2021-09-07T02:11:57.000Z
|
appengine/predator/analysis/linear/changelist_features/touch_crashed_directory.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | 21 |
2020-09-06T02:41:05.000Z
|
2022-03-02T04:40:01.000Z
|
appengine/predator/analysis/linear/changelist_features/touch_crashed_directory.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
from collections import namedtuple
import logging
import math
import os
import re
from analysis import crash_util
from analysis.crash_match import CrashedDirectory
from analysis.linear.feature import Feature
from analysis.linear.feature import FeatureValue
from libs.gitiles.diff import ChangeType
class TouchCrashedDirectoryBaseFeature(Feature): # pylint: disable=W0223
"""Returns either one or zero.
When a suspect touched crashed directory, we return the
value 1. When the there is no directory match, we return value 0.
"""
def __init__(self, include_test_files=True, options=None, level=0):
"""
Args:
include_test_files (boolean): If False, it makes the feature ignore test
files that the suspect touched (e.g. unittest, browsertest, perftest).
options (dict): config dictionary for the feature.
level (int): The level of directory to look up, if the level is 0, that
means, the crashed directory is the directory where the crashed file is
located. If level if 1, it is the parent of the crashed directory.
"""
self._include_test_files = include_test_files
blacklist = options.get('blacklist', []) if options else []
self._blacklist = [directory.lower() for directory in blacklist]
self._level = level
self._path_mappings = []
if options and 'replace_path' in options:
self._path_mappings.append(
crash_util.ReplacePath(options['replace_path']))
def GetCrashedDirectory(self, file_path):
file_parts = file_path.split('/')
return '/'.join(file_parts[:-(self._level + 1)])
def CrashedGroupFactory(self, frame):
"""Factory function to create ``CrashedDirectory``."""
# Since files in root directory are files like OWNERS, DEPS. Skip it.
if not frame or not frame.file_path:
return None
directory = self.GetCrashedDirectory(frame.file_path)
if not directory or directory.lower() in self._blacklist:
return None
return CrashedDirectory(directory) if directory else None
def Match(self, crashed_directory, touched_file):
"""Determines whether a touched_file matches this crashed directory or not.
Args:
touched_file (FileChangeInfo): touched file to examine.
Returns:
Boolean indicating whether it is a match or not.
"""
if not crashed_directory:
return False
if touched_file.change_type == ChangeType.DELETE:
return False
if not self._include_test_files and _IsTestFile(touched_file.new_path):
return False
path = crash_util.MapPath(touched_file.new_path, self._path_mappings)
return path.startswith(crashed_directory.value + '/')
def __call__(self, report):
"""
Args:
report (CrashReport): the crash report being analyzed.
Returns:
A ``FeatureValue`` with name, log-domain value, reason and changed_files.
"""
dep_to_grouped_frame_infos = crash_util.IndexFramesWithCrashedGroup(
report.stacktrace, self.CrashedGroupFactory, report.dependencies)
def FeatureValueGivenReport(suspect):
"""Compute ``FeatureValue`` for a suspect.
Args:
suspect (Suspect): The suspected changelog and some meta information
about it.
Returns:
The ``FeatureValue`` of this feature.
"""
grouped_frame_infos = dep_to_grouped_frame_infos.get(suspect.dep_path, {})
matches = crash_util.MatchSuspectWithFrameInfos(suspect,
grouped_frame_infos,
self.Match)
if not matches:
return FeatureValue(name=self.name,
value=0.0,
reason=None,
changed_files=None)
crashed_directories = [directory.value for directory in matches]
plural = len(crashed_directories) > 1
reason = [
'Suspected changelist touched file(s) in the %s %s, which '
'appear%s in the stack trace.' % (
'directories' if plural else 'directory',
', '.join(crashed_directories),
'' if plural else 's')]
return FeatureValue(
name=self.name,
value=1.0,
reason=reason,
changed_files=None)
return FeatureValueGivenReport
def _IsTestFile(filename):
regex = re.compile(
r'(unittest|perftest|performancetest|browsertest|_test)\.[^/.]+$')
return regex.search(filename) is not None
class TouchCrashedDirectoryFeature(TouchCrashedDirectoryBaseFeature):
"""Determine whether a changelog touched files under the crashed directory.
Crashed directory means the directory of crashed files in stacktrace.
"""
def __init__(self, include_test_files=True, options=None):
"""
Args:
include_test_files (boolean): If False, it makes the feature ignore test
files that the suspect touched (e.g. unittest, browsertest, perftest).
"""
super(TouchCrashedDirectoryFeature, self).__init__(
include_test_files=include_test_files, options=options, level=0)
@property
def name(self):
return 'TouchCrashedDirectory'
class TouchParentDirectoryFeature(TouchCrashedDirectoryBaseFeature):
"""Determine whether a cl touched files under the parent dir of crashed dir.
Crashed directory means the directory of crashed files in stacktrace.
"""
def __init__(self, include_test_files=True, options=None):
"""
Args:
include_test_files (boolean): If False, it makes the feature ignore test
files that the suspect touched (e.g. unittest, browsertest, perftest).
"""
super(TouchParentDirectoryFeature, self).__init__(
include_test_files=include_test_files, options=options, level=1)
@property
def name(self):
return 'TouchParentDirectory'
class TouchGrandParentDirectoryFeature(TouchCrashedDirectoryBaseFeature):
"""Determine whether a cl touched files under grand parent dir of crashed dir.
Crashed directory means the directory of crashed files in stacktrace.
"""
def __init__(self, include_test_files=True, options=None):
"""
Args:
include_test_files (boolean): If False, it makes the feature ignore test
files that the suspect touched (e.g. unittest, browsertest, perftest).
"""
super(TouchGrandParentDirectoryFeature, self).__init__(
include_test_files=include_test_files, options=options, level=2)
@property
def name(self):
return 'TouchGrandParentDirectory'
| 34.854922 | 80 | 0.694217 |
f76416fa28190a8fac9f20f2af2054ed53c030b5
| 9,081 |
py
|
Python
|
rlkit/torch/sac/diayn/diayn_cont.py
|
vincentlui/unsupervised-goal-conditioned-rl
|
4f2e6938e072cb52f8ee779a939fe7bf6a980d45
|
[
"MIT"
] | null | null | null |
rlkit/torch/sac/diayn/diayn_cont.py
|
vincentlui/unsupervised-goal-conditioned-rl
|
4f2e6938e072cb52f8ee779a939fe7bf6a980d45
|
[
"MIT"
] | null | null | null |
rlkit/torch/sac/diayn/diayn_cont.py
|
vincentlui/unsupervised-goal-conditioned-rl
|
4f2e6938e072cb52f8ee779a939fe7bf6a980d45
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
import math
import numpy as np
import torch
import torch.optim as optim
from torch import nn as nn
import torch.nn.functional as F
import rlkit.torch.pytorch_util as ptu
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.torch.torch_rl_algorithm import TorchTrainer
class DIAYNContTrainer(TorchTrainer):
def __init__(
self,
env,
policy,
qf1,
qf2,
target_qf1,
target_qf2,
df,
discount=0.99,
reward_scale=1.0,
policy_lr=1e-3,
qf_lr=1e-3,
df_lr=1e-3,
optimizer_class=optim.Adam,
soft_target_tau=1e-2,
target_update_period=1,
plotter=None,
render_eval_paths=False,
use_automatic_entropy_tuning=True,
target_entropy=None,
):
super().__init__()
self.env = env
self.policy = policy
self.qf1 = qf1
self.qf2 = qf2
self.target_qf1 = target_qf1
self.target_qf2 = target_qf2
self.df = df
self.soft_target_tau = soft_target_tau
self.target_update_period = target_update_period
self.use_automatic_entropy_tuning = use_automatic_entropy_tuning
if self.use_automatic_entropy_tuning:
if target_entropy:
self.target_entropy = target_entropy
else:
self.target_entropy = -np.prod(self.env.action_space.shape).item() # heuristic value from Tuomas
self.log_alpha = ptu.zeros(1, requires_grad=True)
self.alpha_optimizer = optimizer_class(
[self.log_alpha],
lr=policy_lr,
)
self.plotter = plotter
self.render_eval_paths = render_eval_paths
self.qf_criterion = nn.MSELoss()
self.vf_criterion = nn.MSELoss()
self.df_criterion = nn.CrossEntropyLoss()
self.policy_optimizer = optimizer_class(
self.policy.parameters(),
lr=policy_lr,
)
self.qf1_optimizer = optimizer_class(
self.qf1.parameters(),
lr=qf_lr,
)
self.qf2_optimizer = optimizer_class(
self.qf2.parameters(),
lr=qf_lr,
)
self.df_optimizer = optimizer_class(
self.df.parameters(),
lr=df_lr,
)
self.discount = discount
self.reward_scale = reward_scale
self.eval_statistics = OrderedDict()
self._n_train_steps_total = 0
self._need_to_update_eval_statistics = True
def train_from_torch(self, batch):
rewards = batch['rewards']
terminals = batch['terminals']
obs = batch['observations']
actions = batch['actions']
next_obs = batch['next_observations']
skills = batch['skills']
"""
DF Loss and Intrinsic Reward
"""
df_input = torch.cat([obs], dim=1)
df_distribution = self.df(df_input)
log_likelihood = df_distribution.log_prob(skills)
rewards = log_likelihood.reshape(-1, 1)
df_loss = -log_likelihood.mean()
# z_hat = torch.argmax(skills, dim=1)
# d_pred = self.df(next_obs)
# d_pred_log_softmax = F.log_softmax(d_pred, 1)
# _, pred_z = torch.max(d_pred_log_softmax, dim=1, keepdim=True)
# rewards = d_pred_log_softmax[torch.arange(d_pred.shape[0]), z_hat] - math.log(1/self.policy.skill_dim)
# rewards = rewards.reshape(-1, 1)
# df_loss = self.df_criterion(d_pred, z_hat)
"""
Policy and Alpha Loss
"""
new_obs_actions, policy_mean, policy_log_std, log_pi, *_ = self.policy(
skills, reparameterize=True, return_log_prob=True,
)
obs_skills = torch.cat((obs, skills), dim=1)
if self.use_automatic_entropy_tuning:
alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
alpha = self.log_alpha.exp()
else:
alpha_loss = 0
alpha = .1
q_new_actions = torch.min(
self.qf1(obs_skills, new_obs_actions),
self.qf2(obs_skills, new_obs_actions),
)
policy_loss = (alpha*log_pi - q_new_actions).mean()
"""
QF Loss
"""
q1_pred = self.qf1(obs_skills, actions)
q2_pred = self.qf2(obs_skills, actions)
# Make sure policy accounts for squashing functions like tanh correctly!
new_next_actions, _, _, new_log_pi, *_ = self.policy(
skills, reparameterize=True, return_log_prob=True,
)
next_obs_skills = torch.cat((next_obs, skills), dim=1)
target_q_values = torch.min(
self.target_qf1(next_obs_skills, new_next_actions),
self.target_qf2(next_obs_skills, new_next_actions),
) - alpha * new_log_pi
q_target = self.reward_scale * rewards + (1. - terminals) * self.discount * target_q_values
qf1_loss = self.qf_criterion(q1_pred, q_target.detach())
qf2_loss = self.qf_criterion(q2_pred, q_target.detach())
"""
Update networks
"""
self.df_optimizer.zero_grad()
df_loss.backward()
self.df_optimizer.step()
self.qf1_optimizer.zero_grad()
qf1_loss.backward()
self.qf1_optimizer.step()
self.qf2_optimizer.zero_grad()
qf2_loss.backward()
self.qf2_optimizer.step()
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
"""
Soft Updates
"""
if self._n_train_steps_total % self.target_update_period == 0:
ptu.soft_update_from_to(
self.qf1, self.target_qf1, self.soft_target_tau
)
ptu.soft_update_from_to(
self.qf2, self.target_qf2, self.soft_target_tau
)
"""
Save some statistics for eval
"""
# df_accuracy = torch.sum(torch.eq(z_hat, pred_z.reshape(1, list(pred_z.size())[0])[0])).float()/list(pred_z.size())[0]
if self._need_to_update_eval_statistics:
self._need_to_update_eval_statistics = False
"""
Eval should set this to None.
This way, these statistics are only computed for one batch.
"""
policy_loss = (log_pi - q_new_actions).mean()
self.eval_statistics['Intrinsic Rewards'] = np.mean(ptu.get_numpy(rewards))
self.eval_statistics['DF Loss'] = np.mean(ptu.get_numpy(df_loss))
# self.eval_statistics['DF Accuracy'] = np.mean(ptu.get_numpy(df_accuracy))
self.eval_statistics['QF1 Loss'] = np.mean(ptu.get_numpy(qf1_loss))
self.eval_statistics['QF2 Loss'] = np.mean(ptu.get_numpy(qf2_loss))
self.eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(
policy_loss
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q1 Predictions',
ptu.get_numpy(q1_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q2 Predictions',
ptu.get_numpy(q2_pred),
))
# self.eval_statistics.update(create_stats_ordered_dict(
# 'D Predictions',
# ptu.get_numpy(pred_z),
# ))
self.eval_statistics.update(create_stats_ordered_dict(
'Q Targets',
ptu.get_numpy(q_target),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Log Pis',
ptu.get_numpy(log_pi),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy mu',
ptu.get_numpy(policy_mean),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy log std',
ptu.get_numpy(policy_log_std),
))
if self.use_automatic_entropy_tuning:
self.eval_statistics['Alpha'] = alpha.item()
self.eval_statistics['Alpha Loss'] = alpha_loss.item()
self._n_train_steps_total += 1
def get_diagnostics(self):
return self.eval_statistics
def end_epoch(self, epoch):
self._need_to_update_eval_statistics = True
@property
def networks(self):
return [
self.policy,
self.qf1,
self.qf2,
self.target_qf1,
self.target_qf2,
self.df
]
def get_snapshot(self):
return dict(
policy=self.policy,
qf1=self.qf1,
qf2=self.qf2,
target_qf1=self.qf1,
target_qf2=self.qf2,
df=self.df
)
| 33.263736 | 127 | 0.580333 |
f7645d81ab83023bce9da381899330cae5ae4612
| 181 |
py
|
Python
|
oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/log.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/log.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/lib/python2.7/site-packages/prompt_toolkit/contrib/telnet/log.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Python logger for the telnet server.
"""
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__package__)
__all__ = (
'logger',
)
| 15.083333 | 40 | 0.690608 |
f764a6f35b550ac9e35a32cc1440776340ba04c6
| 821 |
py
|
Python
|
290_Word-Pattern.py
|
Coalin/Daily-LeetCode-Exercise
|
a064dcdc3a82314be4571d342c4807291a24f69f
|
[
"MIT"
] | 3 |
2018-07-05T05:51:10.000Z
|
2019-05-04T08:35:44.000Z
|
290_Word-Pattern.py
|
Coalin/Daily-LeetCode-Exercise
|
a064dcdc3a82314be4571d342c4807291a24f69f
|
[
"MIT"
] | null | null | null |
290_Word-Pattern.py
|
Coalin/Daily-LeetCode-Exercise
|
a064dcdc3a82314be4571d342c4807291a24f69f
|
[
"MIT"
] | null | null | null |
class Solution:
def wordPattern(self, pattern, str):
"""
:type pattern: str
:type str: str
:rtype: bool
"""
str_ = str.split(" ")
dic_s = {}
dic_p = {}
res_s = []
res_p = []
for i in range(len(str_)):
if str_[i] in dic_s:
dic_s[str_[i]].append(i)
else:
dic_s[str_[i]] = [i]
for j in range(len(pattern)):
if pattern[j] in dic_p:
dic_p[pattern[j]].append(j)
else:
dic_p[pattern[j]] = [j]
for x in dic_s:
res_s.append(dic_s[x])
for y in dic_p:
res_p.append(dic_p[y])
return res_s == res_p
| 23.457143 | 43 | 0.387333 |
f764aae8e67e90ee4cb876783de81c6a1f58d94c
| 1,380 |
py
|
Python
|
artemis/general/dict_ops.py
|
peteroconnor-bc/artemis
|
ad2871fae7d986bf10580eec27aee5b7315adad5
|
[
"BSD-2-Clause-FreeBSD"
] | 235 |
2016-08-26T14:18:51.000Z
|
2022-03-13T10:54:39.000Z
|
artemis/general/dict_ops.py
|
peteroconnor-bc/artemis
|
ad2871fae7d986bf10580eec27aee5b7315adad5
|
[
"BSD-2-Clause-FreeBSD"
] | 112 |
2016-04-30T11:48:38.000Z
|
2021-01-12T20:17:32.000Z
|
artemis/general/dict_ops.py
|
peteroconnor-bc/artemis
|
ad2871fae7d986bf10580eec27aee5b7315adad5
|
[
"BSD-2-Clause-FreeBSD"
] | 31 |
2016-11-05T19:09:19.000Z
|
2021-09-13T07:35:40.000Z
|
import itertools
__author__ = 'peter'
def cross_dict_dicts(*dicts):
"""
Combine two or more dictionaries of dictionaries by turning every pairwise combination of their keys, and creating a
new dict whose keys are tuples (containing these key-combinations) and whose values are the the combined dictionionaries.
e.g.
cross_dict_dicts({'a':{'aa': 1}, 'b':{'bb': 2}}, {'c': {'cc': 3}, 'd': {'dd': 4}})
returns {
('a','c'):{'aa':1, 'cc':3},
('a','d'):{'aa':1, 'dd':4},
('b','c'):{'bb':2, 'cc':3},
('b','d'):{'bb':2, 'dd':4},
}
This can be useful if, for example, you want to try all several combinations of different arguments to a function.
:param dicts: Dictionaries of dictionaries.
:return: A Dictionary of dictionaries.
"""
cross_dict = dict((keys, merge_dicts(*[d[k] for d, k in zip(dicts, keys)])) for keys in itertools.product(*[d.keys() for d in dicts]))
return cross_dict
def merge_dicts(*dicts):
"""
Given a collection of dictionaries, merge them.
e.g.
merge_dicts({'a': 1, 'b': 2}, {'c': 3, 'd': 4})
returns {'a': 1, 'b': 2, 'c': 3, 'd': 4}
Later dicts overwrite earlier ones.
:param dicts: dictionaries.
:return: A merged dictionary.
"""
return dict((k, v) for d in dicts for k, v in d.items())
| 31.363636 | 138 | 0.577536 |
f764cc55c43f1f4429e5b5bf497f054dc2273fcf
| 2,632 |
py
|
Python
|
neutronclient/tests/unit/qos/test_cli20_qos.py
|
teresa-ho/stx-python-neutronclient
|
35ea6c2c96cbf98755a82cb7c19138648552b778
|
[
"Apache-2.0"
] | null | null | null |
neutronclient/tests/unit/qos/test_cli20_qos.py
|
teresa-ho/stx-python-neutronclient
|
35ea6c2c96cbf98755a82cb7c19138648552b778
|
[
"Apache-2.0"
] | null | null | null |
neutronclient/tests/unit/qos/test_cli20_qos.py
|
teresa-ho/stx-python-neutronclient
|
35ea6c2c96cbf98755a82cb7c19138648552b778
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from mox3 import mox
from neutronclient.neutron.v2_0.qos import qos
from neutronclient.tests.unit import test_cli20
class CLITestV20QoSJson(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20QoSJson, self).setUp(plurals={'qoses': 'qos'})
# FIXME(alegacy):
def _test_create_qos_with_params(self):
def setup_create_stub(resources, data):
reses = {resources: data}
resstr = self.client.serialize(reses)
resp = (test_cli20.MyResp(200), resstr)
path = getattr(self.client, resources + '_path')
self.client.httpclient.request(
test_cli20.end_url(path), 'POST',
body=resstr,
headers=mox.ContainsKeyValue('X-Auth-Token',
test_cli20.TOKEN)).AndReturn(resp)
description = 'test QoS'
tenant_id = 'my-tenant'
policies = "dscp=20"
expected = [('description', 'policies', 'tenant_id'),
(description, '{"dscp": {"dscp": "20"}}', tenant_id)]
args = ['--dscp', policies,
'--description', description,
'--tenant-id', tenant_id,
]
resource = 'qos'
cmd = qos.CreateQoS(test_cli20.MyApp(sys.stdout), None)
qos_data = {"tenant_id": tenant_id,
"policies": {"dscp": {"dscp": "20"}},
"description": description,
}
self.mox.StubOutWithMock(cmd, 'get_client')
self.mox.StubOutWithMock(self.client.httpclient, 'request')
cmd.get_client().AndReturn(self.client)
setup_create_stub(resource, qos_data)
self.mox.ReplayAll()
cmd_parser = cmd.get_parser('create_qos')
parsed_args = cmd_parser.parse_args(args)
result = cmd.get_data(parsed_args)
for res, exp in zip(result, expected):
self.assertEqual(res, exp)
self.mox.VerifyAll()
| 36.555556 | 79 | 0.612082 |
f764f00877f84bee882c151809ee233ec22e5b2c
| 733 |
py
|
Python
|
tests/pipe_proc_tests/ls.py
|
genematx/nmrglue
|
8a24cf6cbd18451e552fc0673b84c42d1dcb69a2
|
[
"BSD-3-Clause"
] | 150 |
2015-01-16T12:24:13.000Z
|
2022-03-03T18:01:18.000Z
|
tests/pipe_proc_tests/ls.py
|
genematx/nmrglue
|
8a24cf6cbd18451e552fc0673b84c42d1dcb69a2
|
[
"BSD-3-Clause"
] | 129 |
2015-01-13T04:58:56.000Z
|
2022-03-02T13:39:16.000Z
|
tests/pipe_proc_tests/ls.py
|
genematx/nmrglue
|
8a24cf6cbd18451e552fc0673b84c42d1dcb69a2
|
[
"BSD-3-Clause"
] | 88 |
2015-02-16T20:04:12.000Z
|
2022-03-10T06:50:30.000Z
|
#! /usr/bin/env python
""" Create files for ls unit test """
import nmrglue.fileio.pipe as pipe
import nmrglue.process.pipe_proc as p
d, a = pipe.read("time_complex.fid")
d, a = p.ls(d, a, ls=2.0, sw=True)
pipe.write("ls1.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.ls(d, a, ls=-3.0, sw=True)
pipe.write("ls2.glue", d, a, overwrite=True)
# freq domain
d, a = pipe.read("freq_real.ft2")
d, a = p.ls(d, a, ls=2.0, sw=True)
pipe.write("ls3.glue", d, a, overwrite=True)
d, a = pipe.read("freq_real.ft2")
d, a = p.ls(d, a, ls=17.0, sw=True)
pipe.write("ls4.glue", d, a, overwrite=True)
d, a = pipe.read("freq_real.ft2")
d, a = p.ls(d, a, ls=-5.0, sw=True)
pipe.write("ls5.glue", d, a, overwrite=True)
| 27.148148 | 44 | 0.637108 |
f7650d45f390ffef6b5fa0464cca2ab180839eed
| 645 |
py
|
Python
|
examples/twitter.py
|
JNRowe-retired/Dolt
|
ffa670fe10c62a9fc788112c893aa8fc007168c9
|
[
"BSD-3-Clause"
] | 8 |
2015-11-05T15:42:26.000Z
|
2021-08-17T09:18:27.000Z
|
examples/twitter.py
|
JNRowe-retired/Dolt
|
ffa670fe10c62a9fc788112c893aa8fc007168c9
|
[
"BSD-3-Clause"
] | null | null | null |
examples/twitter.py
|
JNRowe-retired/Dolt
|
ffa670fe10c62a9fc788112c893aa8fc007168c9
|
[
"BSD-3-Clause"
] | 1 |
2021-03-12T18:51:23.000Z
|
2021-03-12T18:51:23.000Z
|
import getpass
import os, sys
sys.path[0:0] = os.path.join(os.path.dirname(__file__), "..")
from dolt.apis import Twitter
from httplib2 import Http
if __name__ == "__main__":
http = Http()
username = raw_input("Twitter Username: ")
password = getpass.getpass("Twitter Password: ")
http.add_credentials(username, password)
twitter = Twitter(http=http)
user = twitter.users.show("tswicegood")
print "Screen Name: %s" % user['screen_name']
print "Real Name: %s" % user['name']
tweet = raw_input("Tweet something (blank to exit): ")
if len(tweet) > 0:
twitter.statuses.update.POST(status=tweet)
| 29.318182 | 61 | 0.669767 |
f765184b8d5764151f3abee34b4975d8e8c7e1f8
| 3,132 |
py
|
Python
|
test/vanilla/legacy/Expected/AcceptanceTests/AdditionalProperties/additionalproperties/aio/_additional_properties_client.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 35 |
2018-04-03T12:15:53.000Z
|
2022-03-11T14:03:34.000Z
|
test/vanilla/legacy/Expected/AcceptanceTests/AdditionalProperties/additionalproperties/aio/_additional_properties_client.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 652 |
2017-08-28T22:44:41.000Z
|
2022-03-31T21:20:31.000Z
|
test/vanilla/legacy/Expected/AcceptanceTests/AdditionalProperties/additionalproperties/aio/_additional_properties_client.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 29 |
2017-08-28T20:57:01.000Z
|
2022-03-11T14:03:38.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional
from azure.core import AsyncPipelineClient
from azure.core.rest import AsyncHttpResponse, HttpRequest
from msrest import Deserializer, Serializer
from .. import models
from ._configuration import AdditionalPropertiesClientConfiguration
from .operations import PetsOperations
class AdditionalPropertiesClient:
"""Test Infrastructure for AutoRest.
:ivar pets: PetsOperations operations
:vartype pets: additionalproperties.aio.operations.PetsOperations
:param base_url: Service URL. Default value is 'http://localhost:3000'.
:type base_url: str
"""
def __init__(self, base_url: str = "http://localhost:3000", **kwargs: Any) -> None:
self._config = AdditionalPropertiesClientConfiguration(**kwargs)
self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.pets = PetsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "AdditionalPropertiesClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| 44.112676 | 99 | 0.685824 |
f7653ca90434318d84692534b39ad971d0f07119
| 74 |
py
|
Python
|
common/determined_common/schemas/expconf/__init__.py
|
hexieshenghuo/determined
|
1e0948d89373ac26e3134c94591114c0951090d6
|
[
"Apache-2.0"
] | 1 |
2021-03-29T04:25:27.000Z
|
2021-03-29T04:25:27.000Z
|
common/determined_common/schemas/expconf/__init__.py
|
hexieshenghuo/determined
|
1e0948d89373ac26e3134c94591114c0951090d6
|
[
"Apache-2.0"
] | null | null | null |
common/determined_common/schemas/expconf/__init__.py
|
hexieshenghuo/determined
|
1e0948d89373ac26e3134c94591114c0951090d6
|
[
"Apache-2.0"
] | null | null | null |
from determined_common.schemas.expconf._validate import validation_errors
| 37 | 73 | 0.905405 |
f76567315cfa48659bfeab08e5336aff94a255bb
| 6,183 |
py
|
Python
|
asposewordscloud/models/requests/delete_comment_request.py
|
aspose-words-cloud/aspose-words-cloud-python
|
65c7b55fa4aac69b60d41e7f54aed231df285479
|
[
"MIT"
] | 14 |
2018-07-15T17:01:52.000Z
|
2018-11-29T06:15:33.000Z
|
asposewordscloud/models/requests/delete_comment_request.py
|
aspose-words-cloud/aspose-words-cloud-python
|
65c7b55fa4aac69b60d41e7f54aed231df285479
|
[
"MIT"
] | 1 |
2018-09-28T12:59:34.000Z
|
2019-10-08T08:42:59.000Z
|
asposewordscloud/models/requests/delete_comment_request.py
|
aspose-words-cloud/aspose-words-cloud-python
|
65c7b55fa4aac69b60d41e7f54aed231df285479
|
[
"MIT"
] | 2 |
2020-12-21T07:59:17.000Z
|
2022-02-16T21:41:25.000Z
|
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="delete_comment_request.py">
# Copyright (c) 2021 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import json
from six.moves.urllib.parse import quote
from asposewordscloud import *
from asposewordscloud.models import *
from asposewordscloud.models.requests import *
from asposewordscloud.models.responses import *
class DeleteCommentRequest(BaseRequestObject):
"""
Request model for delete_comment operation.
Initializes a new instance.
:param name The filename of the input document.
:param comment_index The index of the comment.
:param folder Original document folder.
:param storage Original document storage.
:param load_encoding Encoding that will be used to load an HTML (or TXT) document if the encoding is not specified in HTML.
:param password Password for opening an encrypted document.
:param dest_file_name Result path of the document after the operation. If this parameter is omitted then result of the operation will be saved as the source document.
:param revision_author Initials of the author to use for revisions.If you set this parameter and then make some changes to the document programmatically, save the document and later open the document in MS Word you will see these changes as revisions.
:param revision_date_time The date and time to use for revisions.
"""
def __init__(self, name, comment_index, folder=None, storage=None, load_encoding=None, password=None, dest_file_name=None, revision_author=None, revision_date_time=None):
self.name = name
self.comment_index = comment_index
self.folder = folder
self.storage = storage
self.load_encoding = load_encoding
self.password = password
self.dest_file_name = dest_file_name
self.revision_author = revision_author
self.revision_date_time = revision_date_time
def create_http_request(self, api_client):
# verify the required parameter 'name' is set
if self.name is None:
raise ValueError("Missing the required parameter `name` when calling `delete_comment`") # noqa: E501
# verify the required parameter 'comment_index' is set
if self.comment_index is None:
raise ValueError("Missing the required parameter `comment_index` when calling `delete_comment`") # noqa: E501
path = '/v4.0/words/{name}/comments/{commentIndex}'
path_params = {}
if self.name is not None:
path_params['name'] = self.name # noqa: E501
else:
path_params['name'] = '' # noqa: E501
if self.comment_index is not None:
path_params['commentIndex'] = self.comment_index # noqa: E501
else:
path_params['commentIndex'] = '' # noqa: E501
# path parameters
collection_formats = {}
if path_params:
path_params = api_client.sanitize_for_serialization(path_params)
path_params = api_client.parameters_to_tuples(path_params, collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
path = path.replace(
'{%s}' % k,
quote(str(v), safe=api_client.configuration.safe_chars_for_path_param)
)
# remove optional path parameters
path = path.replace('//', '/')
query_params = []
if self.folder is not None:
query_params.append(('folder', self.folder)) # noqa: E501
if self.storage is not None:
query_params.append(('storage', self.storage)) # noqa: E501
if self.load_encoding is not None:
query_params.append(('loadEncoding', self.load_encoding)) # noqa: E501
if self.password is not None:
query_params.append(('password', self.password)) # noqa: E501
if self.dest_file_name is not None:
query_params.append(('destFileName', self.dest_file_name)) # noqa: E501
if self.revision_author is not None:
query_params.append(('revisionAuthor', self.revision_author)) # noqa: E501
if self.revision_date_time is not None:
query_params.append(('revisionDateTime', self.revision_date_time)) # noqa: E501
header_params = {}
form_params = []
body_params = None
return {
"method": "DELETE",
"path": path,
"query_params": query_params,
"header_params": header_params,
"form_params": form_params,
"body": body_params,
"collection_formats": collection_formats,
"response_type": 'None' # noqa: E501
}
def get_response_type(self):
return 'None' # noqa: E501
def deserialize_response(self, api_client, response):
return None
| 47.198473 | 255 | 0.658095 |
f76583369b0f86dce82f8c0f174f5aeff290b93d
| 54,023 |
py
|
Python
|
SCRAPE/Lib/site-packages/twisted/python/filepath.py
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
09f6abfc3bcf10ee28f486d83b450c89a07e066e
|
[
"MIT"
] | 4,612 |
2015-01-01T12:57:23.000Z
|
2022-03-30T01:08:23.000Z
|
SCRAPE/Lib/site-packages/twisted/python/filepath.py
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
09f6abfc3bcf10ee28f486d83b450c89a07e066e
|
[
"MIT"
] | 1,243 |
2015-01-23T17:23:59.000Z
|
2022-03-28T13:46:17.000Z
|
SCRAPE/Lib/site-packages/twisted/python/filepath.py
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
09f6abfc3bcf10ee28f486d83b450c89a07e066e
|
[
"MIT"
] | 1,236 |
2015-01-13T14:41:26.000Z
|
2022-03-17T07:12:36.000Z
|
# -*- test-case-name: twisted.test.test_paths -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Object-oriented filesystem path representation.
"""
import base64
import errno
import os
import sys
from os import listdir, stat, utime
from os.path import (
abspath,
basename,
dirname,
exists,
isabs,
join as joinpath,
normpath,
splitext,
)
from stat import (
S_IMODE,
S_IRGRP,
S_IROTH,
S_IRUSR,
S_ISBLK,
S_ISDIR,
S_ISREG,
S_ISSOCK,
S_IWGRP,
S_IWOTH,
S_IWUSR,
S_IXGRP,
S_IXOTH,
S_IXUSR,
)
from typing import IO, Union, cast
from zope.interface import Attribute, Interface, implementer
from twisted.python.compat import cmp, comparable
from twisted.python.runtime import platform
from twisted.python.util import FancyEqMixin
from twisted.python.win32 import (
ERROR_DIRECTORY,
ERROR_FILE_NOT_FOUND,
ERROR_INVALID_NAME,
ERROR_PATH_NOT_FOUND,
O_BINARY,
)
# Please keep this as light as possible on other Twisted imports; many, many
# things import this module, and it would be good if it could easily be
# modified for inclusion in the standard library. --glyph
_CREATE_FLAGS = os.O_EXCL | os.O_CREAT | os.O_RDWR | O_BINARY
def _stub_islink(path):
"""
Always return C{False} if the operating system does not support symlinks.
@param path: A path string.
@type path: L{str}
@return: C{False}
@rtype: L{bool}
"""
return False
islink = getattr(os.path, "islink", _stub_islink)
randomBytes = os.urandom
armor = base64.urlsafe_b64encode
class IFilePath(Interface):
"""
File path object.
A file path represents a location for a file-like-object and can be
organized into a hierarchy; a file path can can children which are
themselves file paths.
A file path has a name which unique identifies it in the context of its
parent (if it has one); a file path can not have two children with the same
name. This name is referred to as the file path's "base name".
A series of such names can be used to locate nested children of a file
path; such a series is referred to as the child's "path", relative to the
parent. In this case, each name in the path is referred to as a "path
segment"; the child's base name is the segment in the path.
When representing a file path as a string, a "path separator" is used to
delimit the path segments within the string. For a file system path, that
would be C{os.sep}.
Note that the values of child names may be restricted. For example, a file
system path will not allow the use of the path separator in a name, and
certain names (e.g. C{"."} and C{".."}) may be reserved or have special
meanings.
@since: 12.1
"""
sep = Attribute("The path separator to use in string representations")
def child(name):
"""
Obtain a direct child of this file path. The child may or may not
exist.
@param name: the name of a child of this path. C{name} must be a direct
child of this path and may not contain a path separator.
@return: the child of this path with the given C{name}.
@raise InsecurePath: if C{name} describes a file path that is not a
direct child of this file path.
"""
def open(mode="r"):
"""
Opens this file path with the given mode.
@return: a file-like object.
@raise Exception: if this file path cannot be opened.
"""
def changed():
"""
Clear any cached information about the state of this path on disk.
"""
def getsize():
"""
Retrieve the size of this file in bytes.
@return: the size of the file at this file path in bytes.
@raise Exception: if the size cannot be obtained.
"""
def getModificationTime():
"""
Retrieve the time of last access from this file.
@return: a number of seconds from the epoch.
@rtype: L{float}
"""
def getStatusChangeTime():
"""
Retrieve the time of the last status change for this file.
@return: a number of seconds from the epoch.
@rtype: L{float}
"""
def getAccessTime():
"""
Retrieve the time that this file was last accessed.
@return: a number of seconds from the epoch.
@rtype: L{float}
"""
def exists():
"""
Check if this file path exists.
@return: C{True} if the file at this file path exists, C{False}
otherwise.
@rtype: L{bool}
"""
def isdir():
"""
Check if this file path refers to a directory.
@return: C{True} if the file at this file path is a directory, C{False}
otherwise.
"""
def isfile():
"""
Check if this file path refers to a regular file.
@return: C{True} if the file at this file path is a regular file,
C{False} otherwise.
"""
def children():
"""
List the children of this path object.
@return: a sequence of the children of the directory at this file path.
@raise Exception: if the file at this file path is not a directory.
"""
def basename():
"""
Retrieve the final component of the file path's path (everything
after the final path separator).
@return: the base name of this file path.
@rtype: L{str}
"""
def parent():
"""
A file path for the directory containing the file at this file path.
"""
def sibling(name):
"""
A file path for the directory containing the file at this file path.
@param name: the name of a sibling of this path. C{name} must be a
direct sibling of this path and may not contain a path separator.
@return: a sibling file path of this one.
"""
class InsecurePath(Exception):
"""
Error that is raised when the path provided to L{FilePath} is invalid.
"""
class LinkError(Exception):
"""
An error with symlinks - either that there are cyclical symlinks or that
symlink are not supported on this platform.
"""
class UnlistableError(OSError):
"""
An exception which is used to distinguish between errors which mean 'this
is not a directory you can list' and other, more catastrophic errors.
This error will try to look as much like the original error as possible,
while still being catchable as an independent type.
@ivar originalException: the actual original exception instance.
"""
def __init__(self, originalException: OSError):
"""
Create an UnlistableError exception.
@param originalException: an instance of OSError.
"""
self.__dict__.update(originalException.__dict__)
self.originalException = originalException
def _secureEnoughString(path):
"""
Compute a string usable as a new, temporary filename.
@param path: The path that the new temporary filename should be able to be
concatenated with.
@return: A pseudorandom, 16 byte string for use in secure filenames.
@rtype: the type of C{path}
"""
secureishString = armor(randomBytes(16))[:16]
return _coerceToFilesystemEncoding(path, secureishString)
class AbstractFilePath:
"""
Abstract implementation of an L{IFilePath}; must be completed by a
subclass.
This class primarily exists to provide common implementations of certain
methods in L{IFilePath}. It is *not* a required parent class for
L{IFilePath} implementations, just a useful starting point.
"""
def getContent(self):
"""
Retrieve the contents of the file at this path.
@return: the contents of the file
@rtype: L{bytes}
"""
with self.open() as fp:
return fp.read()
def parents(self):
"""
Retrieve an iterator of all the ancestors of this path.
@return: an iterator of all the ancestors of this path, from the most
recent (its immediate parent) to the root of its filesystem.
"""
path = self
parent = path.parent()
# root.parent() == root, so this means "are we the root"
while path != parent:
yield parent
path = parent
parent = parent.parent()
def children(self):
"""
List the children of this path object.
@raise OSError: If an error occurs while listing the directory. If the
error is 'serious', meaning that the operation failed due to an access
violation, exhaustion of some kind of resource (file descriptors or
memory), OSError or a platform-specific variant will be raised.
@raise UnlistableError: If the inability to list the directory is due
to this path not existing or not being a directory, the more specific
OSError subclass L{UnlistableError} is raised instead.
@return: an iterable of all currently-existing children of this object.
"""
try:
subnames = self.listdir()
except OSError as ose:
# Under Python 3.3 and higher on Windows, WindowsError is an
# alias for OSError. OSError has a winerror attribute and an
# errno attribute.
#
# The winerror attribute is bound to the Windows error code while
# the errno attribute is bound to a translation of that code to a
# perhaps equivalent POSIX error number.
#
# For further details, refer to:
# https://docs.python.org/3/library/exceptions.html#OSError
if getattr(ose, "winerror", None) in (
ERROR_PATH_NOT_FOUND,
ERROR_FILE_NOT_FOUND,
ERROR_INVALID_NAME,
ERROR_DIRECTORY,
):
raise UnlistableError(ose)
if ose.errno in (errno.ENOENT, errno.ENOTDIR):
raise UnlistableError(ose)
# Other possible errors here, according to linux manpages:
# EACCES, EMIFLE, ENFILE, ENOMEM. None of these seem like the
# sort of thing which should be handled normally. -glyph
raise
return [self.child(name) for name in subnames]
def walk(self, descend=None):
"""
Yield myself, then each of my children, and each of those children's
children in turn.
The optional argument C{descend} is a predicate that takes a FilePath,
and determines whether or not that FilePath is traversed/descended
into. It will be called with each path for which C{isdir} returns
C{True}. If C{descend} is not specified, all directories will be
traversed (including symbolic links which refer to directories).
@param descend: A one-argument callable that will return True for
FilePaths that should be traversed, False otherwise.
@return: a generator yielding FilePath-like objects.
"""
yield self
if self.isdir():
for c in self.children():
# we should first see if it's what we want, then we
# can walk through the directory
if descend is None or descend(c):
for subc in c.walk(descend):
if os.path.realpath(self.path).startswith(
os.path.realpath(subc.path)
):
raise LinkError("Cycle in file graph.")
yield subc
else:
yield c
def sibling(self, path):
"""
Return a L{FilePath} with the same directory as this instance but with
a basename of C{path}.
@param path: The basename of the L{FilePath} to return.
@type path: L{str}
@return: The sibling path.
@rtype: L{FilePath}
"""
return self.parent().child(path)
def descendant(self, segments):
"""
Retrieve a child or child's child of this path.
@param segments: A sequence of path segments as L{str} instances.
@return: A L{FilePath} constructed by looking up the C{segments[0]}
child of this path, the C{segments[1]} child of that path, and so
on.
@since: 10.2
"""
path = self
for name in segments:
path = path.child(name)
return path
def segmentsFrom(self, ancestor):
"""
Return a list of segments between a child and its ancestor.
For example, in the case of a path X representing /a/b/c/d and a path Y
representing /a/b, C{Y.segmentsFrom(X)} will return C{['c',
'd']}.
@param ancestor: an instance of the same class as self, ostensibly an
ancestor of self.
@raise ValueError: If the C{ancestor} parameter is not actually an
ancestor, i.e. a path for /x/y/z is passed as an ancestor for /a/b/c/d.
@return: a list of strs
"""
# this might be an unnecessarily inefficient implementation but it will
# work on win32 and for zipfiles; later I will deterimine if the
# obvious fast implemenation does the right thing too
f = self
p = f.parent()
segments = []
while f != ancestor and p != f:
segments[0:0] = [f.basename()]
f = p
p = p.parent()
if f == ancestor and segments:
return segments
raise ValueError(f"{ancestor!r} not parent of {self!r}")
# new in 8.0
def __hash__(self):
"""
Hash the same as another L{FilePath} with the same path as mine.
"""
return hash((self.__class__, self.path))
# pending deprecation in 8.0
def getmtime(self):
"""
Deprecated. Use getModificationTime instead.
"""
return int(self.getModificationTime())
def getatime(self):
"""
Deprecated. Use getAccessTime instead.
"""
return int(self.getAccessTime())
def getctime(self):
"""
Deprecated. Use getStatusChangeTime instead.
"""
return int(self.getStatusChangeTime())
class RWX(FancyEqMixin):
"""
A class representing read/write/execute permissions for a single user
category (i.e. user/owner, group, or other/world). Instantiate with
three boolean values: readable? writable? executable?.
@type read: C{bool}
@ivar read: Whether permission to read is given
@type write: C{bool}
@ivar write: Whether permission to write is given
@type execute: C{bool}
@ivar execute: Whether permission to execute is given
@since: 11.1
"""
compareAttributes = ("read", "write", "execute")
def __init__(self, readable, writable, executable):
self.read = readable
self.write = writable
self.execute = executable
def __repr__(self) -> str:
return "RWX(read={}, write={}, execute={})".format(
self.read,
self.write,
self.execute,
)
def shorthand(self):
"""
Returns a short string representing the permission bits. Looks like
part of what is printed by command line utilities such as 'ls -l'
(e.g. 'rwx')
@return: The shorthand string.
@rtype: L{str}
"""
returnval = ["r", "w", "x"]
i = 0
for val in (self.read, self.write, self.execute):
if not val:
returnval[i] = "-"
i += 1
return "".join(returnval)
class Permissions(FancyEqMixin):
"""
A class representing read/write/execute permissions. Instantiate with any
portion of the file's mode that includes the permission bits.
@type user: L{RWX}
@ivar user: User/Owner permissions
@type group: L{RWX}
@ivar group: Group permissions
@type other: L{RWX}
@ivar other: Other/World permissions
@since: 11.1
"""
compareAttributes = ("user", "group", "other")
def __init__(self, statModeInt):
self.user, self.group, self.other = (
RWX(*(statModeInt & bit > 0 for bit in bitGroup))
for bitGroup in [
[S_IRUSR, S_IWUSR, S_IXUSR],
[S_IRGRP, S_IWGRP, S_IXGRP],
[S_IROTH, S_IWOTH, S_IXOTH],
]
)
def __repr__(self) -> str:
return f"[{str(self.user)} | {str(self.group)} | {str(self.other)}]"
def shorthand(self):
"""
Returns a short string representing the permission bits. Looks like
what is printed by command line utilities such as 'ls -l'
(e.g. 'rwx-wx--x')
@return: The shorthand string.
@rtype: L{str}
"""
return "".join([x.shorthand() for x in (self.user, self.group, self.other)])
def _asFilesystemBytes(path: Union[bytes, str], encoding: str = "") -> bytes:
"""
Return C{path} as a string of L{bytes} suitable for use on this system's
filesystem.
@param path: The path to be made suitable.
@type path: L{bytes} or L{unicode}
@param encoding: The encoding to use if coercing to L{bytes}. If none is
given, L{sys.getfilesystemencoding} is used.
@return: L{bytes}
"""
if isinstance(path, bytes):
return path
else:
if not encoding:
encoding = sys.getfilesystemencoding()
return path.encode(encoding)
def _asFilesystemText(path, encoding=None):
"""
Return C{path} as a string of L{unicode} suitable for use on this system's
filesystem.
@param path: The path to be made suitable.
@type path: L{bytes} or L{unicode}
@param encoding: The encoding to use if coercing to L{unicode}. If none
is given, L{sys.getfilesystemencoding} is used.
@return: L{unicode}
"""
if type(path) == str:
return path
else:
if encoding is None:
encoding = sys.getfilesystemencoding()
return path.decode(encoding)
def _coerceToFilesystemEncoding(path, newpath, encoding=None):
"""
Return a C{newpath} that is suitable for joining to C{path}.
@param path: The path that it should be suitable for joining to.
@param newpath: The new portion of the path to be coerced if needed.
@param encoding: If coerced, the encoding that will be used.
"""
if type(path) == bytes:
return _asFilesystemBytes(newpath, encoding=encoding)
else:
return _asFilesystemText(newpath, encoding=encoding)
@comparable
@implementer(IFilePath)
class FilePath(AbstractFilePath):
"""
I am a path on the filesystem that only permits 'downwards' access.
Instantiate me with a pathname (for example,
FilePath('/home/myuser/public_html')) and I will attempt to only provide
access to files which reside inside that path. I may be a path to a file,
a directory, or a file which does not exist.
The correct way to use me is to instantiate me, and then do ALL filesystem
access through me. In other words, do not import the 'os' module; if you
need to open a file, call my 'open' method. If you need to list a
directory, call my 'path' method.
Even if you pass me a relative path, I will convert that to an absolute
path internally.
The type of C{path} when instantiating decides the mode of the L{FilePath}.
That is, C{FilePath(b"/")} will return a L{bytes} mode L{FilePath}, and
C{FilePath(u"/")} will return a L{unicode} mode L{FilePath}.
C{FilePath("/")} will return a L{bytes} mode L{FilePath} on Python 2, and a
L{unicode} mode L{FilePath} on Python 3.
Methods that return a new L{FilePath} use the type of the given subpath to
decide its mode. For example, C{FilePath(b"/").child(u"tmp")} will return a
L{unicode} mode L{FilePath}.
@type alwaysCreate: L{bool}
@ivar alwaysCreate: When opening this file, only succeed if the file does
not already exist.
@ivar path: The path from which 'downward' traversal is permitted.
"""
_statinfo = None
path: Union[bytes, str] = None # type: ignore[assignment]
def __init__(self, path, alwaysCreate=False):
"""
Convert a path string to an absolute path if necessary and initialize
the L{FilePath} with the result.
"""
self.path = abspath(path)
self.alwaysCreate = alwaysCreate
def __getstate__(self):
"""
Support serialization by discarding cached L{os.stat} results and
returning everything else.
"""
d = self.__dict__.copy()
if "_statinfo" in d:
del d["_statinfo"]
return d
@property
def sep(self):
"""
Return a filesystem separator.
@return: The native filesystem separator.
@returntype: The same type as C{self.path}.
"""
return _coerceToFilesystemEncoding(self.path, os.sep)
def _asBytesPath(self, encoding=None):
"""
Return the path of this L{FilePath} as bytes.
@param encoding: The encoding to use if coercing to L{bytes}. If none is
given, L{sys.getfilesystemencoding} is used.
@return: L{bytes}
"""
return _asFilesystemBytes(self.path, encoding=encoding)
def _asTextPath(self, encoding=None):
"""
Return the path of this L{FilePath} as text.
@param encoding: The encoding to use if coercing to L{unicode}. If none
is given, L{sys.getfilesystemencoding} is used.
@return: L{unicode}
"""
return _asFilesystemText(self.path, encoding=encoding)
def asBytesMode(self, encoding=None):
"""
Return this L{FilePath} in L{bytes}-mode.
@param encoding: The encoding to use if coercing to L{bytes}. If none is
given, L{sys.getfilesystemencoding} is used.
@return: L{bytes} mode L{FilePath}
"""
if type(self.path) == str:
return self.clonePath(self._asBytesPath(encoding=encoding))
return self
def asTextMode(self, encoding=None):
"""
Return this L{FilePath} in L{unicode}-mode.
@param encoding: The encoding to use if coercing to L{unicode}. If none
is given, L{sys.getfilesystemencoding} is used.
@return: L{unicode} mode L{FilePath}
"""
if type(self.path) == bytes:
return self.clonePath(self._asTextPath(encoding=encoding))
return self
def _getPathAsSameTypeAs(self, pattern):
"""
If C{pattern} is C{bytes}, return L{FilePath.path} as L{bytes}.
Otherwise, return L{FilePath.path} as L{unicode}.
@param pattern: The new element of the path that L{FilePath.path} may
need to be coerced to match.
"""
if type(pattern) == bytes:
return self._asBytesPath()
else:
return self._asTextPath()
def child(self, path):
"""
Create and return a new L{FilePath} representing a path contained by
C{self}.
@param path: The base name of the new L{FilePath}. If this contains
directory separators or parent references it will be rejected.
@type path: L{bytes} or L{unicode}
@raise InsecurePath: If the result of combining this path with C{path}
would result in a path which is not a direct child of this path.
@return: The child path.
@rtype: L{FilePath} with a mode equal to the type of C{path}.
"""
colon = _coerceToFilesystemEncoding(path, ":")
sep = _coerceToFilesystemEncoding(path, os.sep)
ourPath = self._getPathAsSameTypeAs(path)
if platform.isWindows() and path.count(colon):
# Catch paths like C:blah that don't have a slash
raise InsecurePath(f"{path!r} contains a colon.")
norm = normpath(path)
if sep in norm:
raise InsecurePath(f"{path!r} contains one or more directory separators")
newpath = abspath(joinpath(ourPath, norm))
if not newpath.startswith(ourPath):
raise InsecurePath(f"{newpath!r} is not a child of {ourPath}")
return self.clonePath(newpath)
def preauthChild(self, path):
"""
Use me if C{path} might have slashes in it, but you know they're safe.
@param path: A relative path (ie, a path not starting with C{"/"})
which will be interpreted as a child or descendant of this path.
@type path: L{bytes} or L{unicode}
@return: The child path.
@rtype: L{FilePath} with a mode equal to the type of C{path}.
"""
ourPath = self._getPathAsSameTypeAs(path)
newpath = abspath(joinpath(ourPath, normpath(path)))
if not newpath.startswith(ourPath):
raise InsecurePath(f"{newpath} is not a child of {ourPath}")
return self.clonePath(newpath)
def childSearchPreauth(self, *paths):
"""
Return my first existing child with a name in C{paths}.
C{paths} is expected to be a list of *pre-secured* path fragments;
in most cases this will be specified by a system administrator and not
an arbitrary user.
If no appropriately-named children exist, this will return L{None}.
@return: L{None} or the child path.
@rtype: L{None} or L{FilePath}
"""
for child in paths:
p = self._getPathAsSameTypeAs(child)
jp = joinpath(p, child)
if exists(jp):
return self.clonePath(jp)
def siblingExtensionSearch(self, *exts):
"""
Attempt to return a path with my name, given multiple possible
extensions.
Each extension in C{exts} will be tested and the first path which
exists will be returned. If no path exists, L{None} will be returned.
If C{''} is in C{exts}, then if the file referred to by this path
exists, C{self} will be returned.
The extension '*' has a magic meaning, which means "any path that
begins with C{self.path + '.'} is acceptable".
"""
for ext in exts:
if not ext and self.exists():
return self
p = self._getPathAsSameTypeAs(ext)
star = _coerceToFilesystemEncoding(ext, "*")
dot = _coerceToFilesystemEncoding(ext, ".")
if ext == star:
basedot = basename(p) + dot
for fn in listdir(dirname(p)):
if fn.startswith(basedot):
return self.clonePath(joinpath(dirname(p), fn))
p2 = p + ext
if exists(p2):
return self.clonePath(p2)
def realpath(self):
"""
Returns the absolute target as a L{FilePath} if self is a link, self
otherwise.
The absolute link is the ultimate file or directory the
link refers to (for instance, if the link refers to another link, and
another...). If the filesystem does not support symlinks, or
if the link is cyclical, raises a L{LinkError}.
Behaves like L{os.path.realpath} in that it does not resolve link
names in the middle (ex. /x/y/z, y is a link to w - realpath on z
will return /x/y/z, not /x/w/z).
@return: L{FilePath} of the target path.
@rtype: L{FilePath}
@raises LinkError: if links are not supported or links are cyclical.
"""
if self.islink():
result = os.path.realpath(self.path)
if result == self.path:
raise LinkError("Cyclical link - will loop forever")
return self.clonePath(result)
return self
def siblingExtension(self, ext):
"""
Attempt to return a path with my name, given the extension at C{ext}.
@param ext: File-extension to search for.
@type ext: L{bytes} or L{unicode}
@return: The sibling path.
@rtype: L{FilePath} with the same mode as the type of C{ext}.
"""
ourPath = self._getPathAsSameTypeAs(ext)
return self.clonePath(ourPath + ext)
def linkTo(self, linkFilePath):
"""
Creates a symlink to self to at the path in the L{FilePath}
C{linkFilePath}.
Only works on posix systems due to its dependence on
L{os.symlink}. Propagates L{OSError}s up from L{os.symlink} if
C{linkFilePath.parent()} does not exist, or C{linkFilePath} already
exists.
@param linkFilePath: a FilePath representing the link to be created.
@type linkFilePath: L{FilePath}
"""
os.symlink(self.path, linkFilePath.path)
def open(self, mode: str = "r") -> IO[bytes]:
"""
Open this file using C{mode} or for writing if C{alwaysCreate} is
C{True}.
In all cases the file is opened in binary mode, so it is not necessary
to include C{"b"} in C{mode}.
@param mode: The mode to open the file in. Default is C{"r"}.
@raises AssertionError: If C{"a"} is included in the mode and
C{alwaysCreate} is C{True}.
@return: An open file-like object.
"""
if self.alwaysCreate:
assert "a" not in mode, (
"Appending not supported when " "alwaysCreate == True"
)
return self.create()
# Make sure we open with exactly one "b" in the mode.
mode = mode.replace("b", "")
return open(self.path, mode + "b")
# stat methods below
def restat(self, reraise=True):
"""
Re-calculate cached effects of 'stat'. To refresh information on this
path after you know the filesystem may have changed, call this method.
@param reraise: a boolean. If true, re-raise exceptions from
L{os.stat}; otherwise, mark this path as not existing, and remove
any cached stat information.
@raise Exception: If C{reraise} is C{True} and an exception occurs
while reloading metadata.
"""
try:
self._statinfo = stat(self.path)
except OSError:
self._statinfo = 0
if reraise:
raise
def changed(self):
"""
Clear any cached information about the state of this path on disk.
@since: 10.1.0
"""
self._statinfo = None
def chmod(self, mode):
"""
Changes the permissions on self, if possible. Propagates errors from
L{os.chmod} up.
@param mode: integer representing the new permissions desired (same as
the command line chmod)
@type mode: L{int}
"""
os.chmod(self.path, mode)
def getsize(self):
"""
Retrieve the size of this file in bytes.
@return: The size of the file at this file path in bytes.
@raise Exception: if the size cannot be obtained.
@rtype: L{int}
"""
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return st.st_size
def getModificationTime(self):
"""
Retrieve the time of last access from this file.
@return: a number of seconds from the epoch.
@rtype: L{float}
"""
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return float(st.st_mtime)
def getStatusChangeTime(self):
"""
Retrieve the time of the last status change for this file.
@return: a number of seconds from the epoch.
@rtype: L{float}
"""
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return float(st.st_ctime)
def getAccessTime(self):
"""
Retrieve the time that this file was last accessed.
@return: a number of seconds from the epoch.
@rtype: L{float}
"""
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return float(st.st_atime)
def getInodeNumber(self):
"""
Retrieve the file serial number, also called inode number, which
distinguishes this file from all other files on the same device.
@raise NotImplementedError: if the platform is Windows, since the
inode number would be a dummy value for all files in Windows
@return: a number representing the file serial number
@rtype: L{int}
@since: 11.0
"""
if platform.isWindows():
raise NotImplementedError
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return st.st_ino
def getDevice(self):
"""
Retrieves the device containing the file. The inode number and device
number together uniquely identify the file, but the device number is
not necessarily consistent across reboots or system crashes.
@raise NotImplementedError: if the platform is Windows, since the
device number would be 0 for all partitions on a Windows platform
@return: a number representing the device
@rtype: L{int}
@since: 11.0
"""
if platform.isWindows():
raise NotImplementedError
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return st.st_dev
def getNumberOfHardLinks(self):
"""
Retrieves the number of hard links to the file.
This count keeps track of how many directories have entries for this
file. If the count is ever decremented to zero then the file itself is
discarded as soon as no process still holds it open. Symbolic links
are not counted in the total.
@raise NotImplementedError: if the platform is Windows, since Windows
doesn't maintain a link count for directories, and L{os.stat} does
not set C{st_nlink} on Windows anyway.
@return: the number of hard links to the file
@rtype: L{int}
@since: 11.0
"""
if platform.isWindows():
raise NotImplementedError
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return st.st_nlink
def getUserID(self):
"""
Returns the user ID of the file's owner.
@raise NotImplementedError: if the platform is Windows, since the UID
is always 0 on Windows
@return: the user ID of the file's owner
@rtype: L{int}
@since: 11.0
"""
if platform.isWindows():
raise NotImplementedError
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return st.st_uid
def getGroupID(self):
"""
Returns the group ID of the file.
@raise NotImplementedError: if the platform is Windows, since the GID
is always 0 on windows
@return: the group ID of the file
@rtype: L{int}
@since: 11.0
"""
if platform.isWindows():
raise NotImplementedError
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return st.st_gid
def getPermissions(self):
"""
Returns the permissions of the file. Should also work on Windows,
however, those permissions may not be what is expected in Windows.
@return: the permissions for the file
@rtype: L{Permissions}
@since: 11.1
"""
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return Permissions(S_IMODE(st.st_mode))
def exists(self):
"""
Check if this L{FilePath} exists.
@return: C{True} if the stats of C{path} can be retrieved successfully,
C{False} in the other cases.
@rtype: L{bool}
"""
if self._statinfo:
return True
else:
self.restat(False)
if self._statinfo:
return True
else:
return False
def isdir(self):
"""
Check if this L{FilePath} refers to a directory.
@return: C{True} if this L{FilePath} refers to a directory, C{False}
otherwise.
@rtype: L{bool}
"""
st = self._statinfo
if not st:
self.restat(False)
st = self._statinfo
if not st:
return False
return S_ISDIR(st.st_mode)
def isfile(self):
"""
Check if this file path refers to a regular file.
@return: C{True} if this L{FilePath} points to a regular file (not a
directory, socket, named pipe, etc), C{False} otherwise.
@rtype: L{bool}
"""
st = self._statinfo
if not st:
self.restat(False)
st = self._statinfo
if not st:
return False
return S_ISREG(st.st_mode)
def isBlockDevice(self):
"""
Returns whether the underlying path is a block device.
@return: C{True} if it is a block device, C{False} otherwise
@rtype: L{bool}
@since: 11.1
"""
st = self._statinfo
if not st:
self.restat(False)
st = self._statinfo
if not st:
return False
return S_ISBLK(st.st_mode)
def isSocket(self):
"""
Returns whether the underlying path is a socket.
@return: C{True} if it is a socket, C{False} otherwise
@rtype: L{bool}
@since: 11.1
"""
st = self._statinfo
if not st:
self.restat(False)
st = self._statinfo
if not st:
return False
return S_ISSOCK(st.st_mode)
def islink(self):
"""
Check if this L{FilePath} points to a symbolic link.
@return: C{True} if this L{FilePath} points to a symbolic link,
C{False} otherwise.
@rtype: L{bool}
"""
# We can't use cached stat results here, because that is the stat of
# the destination - (see #1773) which in *every case* but this one is
# the right thing to use. We could call lstat here and use that, but
# it seems unlikely we'd actually save any work that way. -glyph
return islink(self.path)
def isabs(self):
"""
Check if this L{FilePath} refers to an absolute path.
This always returns C{True}.
@return: C{True}, always.
@rtype: L{bool}
"""
return isabs(self.path)
def listdir(self):
"""
List the base names of the direct children of this L{FilePath}.
@return: A L{list} of L{bytes}/L{unicode} giving the names of the
contents of the directory this L{FilePath} refers to. These names
are relative to this L{FilePath}.
@rtype: L{list}
@raise OSError: Any exception the platform L{os.listdir} implementation
may raise.
"""
return listdir(self.path)
def splitext(self):
"""
Split the file path into a pair C{(root, ext)} such that
C{root + ext == path}.
@return: Tuple where the first item is the filename and second item is
the file extension. See Python docs for L{os.path.splitext}.
@rtype: L{tuple}
"""
return splitext(self.path)
def __repr__(self) -> str:
return f"FilePath({self.path!r})"
def touch(self):
"""
Updates the access and last modification times of the file at this
file path to the current time. Also creates the file if it does not
already exist.
@raise Exception: if unable to create or modify the last modification
time of the file.
"""
try:
self.open("a").close()
except OSError:
pass
utime(self.path, None)
def remove(self):
"""
Removes the file or directory that is represented by self. If
C{self.path} is a directory, recursively remove all its children
before removing the directory. If it's a file or link, just delete it.
"""
if self.isdir() and not self.islink():
for child in self.children():
child.remove()
os.rmdir(self.path)
else:
os.remove(self.path)
self.changed()
def makedirs(self, ignoreExistingDirectory=False):
"""
Create all directories not yet existing in C{path} segments, using
L{os.makedirs}.
@param ignoreExistingDirectory: Don't raise L{OSError} if directory
already exists.
@type ignoreExistingDirectory: L{bool}
@return: L{None}
"""
try:
return os.makedirs(self.path)
except OSError as e:
if not (
e.errno == errno.EEXIST and ignoreExistingDirectory and self.isdir()
):
raise
def globChildren(self, pattern):
"""
Assuming I am representing a directory, return a list of FilePaths
representing my children that match the given pattern.
@param pattern: A glob pattern to use to match child paths.
@type pattern: L{unicode} or L{bytes}
@return: A L{list} of matching children.
@rtype: L{list} of L{FilePath}, with the mode of C{pattern}'s type
"""
sep = _coerceToFilesystemEncoding(pattern, os.sep)
ourPath = self._getPathAsSameTypeAs(pattern)
import glob
path = ourPath[-1] == sep and ourPath + pattern or sep.join([ourPath, pattern])
return [self.clonePath(p) for p in glob.glob(path)]
def basename(self):
"""
Retrieve the final component of the file path's path (everything
after the final path separator).
@return: The final component of the L{FilePath}'s path (Everything
after the final path separator).
@rtype: the same type as this L{FilePath}'s C{path} attribute
"""
return basename(self.path)
def dirname(self):
"""
Retrieve all of the components of the L{FilePath}'s path except the
last one (everything up to the final path separator).
@return: All of the components of the L{FilePath}'s path except the
last one (everything up to the final path separator).
@rtype: the same type as this L{FilePath}'s C{path} attribute
"""
return dirname(self.path)
def parent(self):
"""
A file path for the directory containing the file at this file path.
@return: A L{FilePath} representing the path which directly contains
this L{FilePath}.
@rtype: L{FilePath}
"""
return self.clonePath(self.dirname())
def setContent(self, content, ext=b".new"):
"""
Replace the file at this path with a new file that contains the given
bytes, trying to avoid data-loss in the meanwhile.
On UNIX-like platforms, this method does its best to ensure that by the
time this method returns, either the old contents I{or} the new
contents of the file will be present at this path for subsequent
readers regardless of premature device removal, program crash, or power
loss, making the following assumptions:
- your filesystem is journaled (i.e. your filesystem will not
I{itself} lose data due to power loss)
- your filesystem's C{rename()} is atomic
- your filesystem will not discard new data while preserving new
metadata (see U{http://mjg59.livejournal.com/108257.html} for
more detail)
On most versions of Windows there is no atomic C{rename()} (see
U{http://bit.ly/win32-overwrite} for more information), so this method
is slightly less helpful. There is a small window where the file at
this path may be deleted before the new file is moved to replace it:
however, the new file will be fully written and flushed beforehand so
in the unlikely event that there is a crash at that point, it should be
possible for the user to manually recover the new version of their
data. In the future, Twisted will support atomic file moves on those
versions of Windows which I{do} support them: see U{Twisted ticket
3004<http://twistedmatrix.com/trac/ticket/3004>}.
This method should be safe for use by multiple concurrent processes,
but note that it is not easy to predict which process's contents will
ultimately end up on disk if they invoke this method at close to the
same time.
@param content: The desired contents of the file at this path.
@type content: L{bytes}
@param ext: An extension to append to the temporary filename used to
store the bytes while they are being written. This can be used to
make sure that temporary files can be identified by their suffix,
for cleanup in case of crashes.
@type ext: L{bytes}
"""
sib = self.temporarySibling(ext)
with sib.open("w") as f:
f.write(content)
if platform.isWindows() and exists(self.path):
os.unlink(self.path)
os.rename(sib.path, self.asBytesMode().path)
def __cmp__(self, other):
if not isinstance(other, FilePath):
return NotImplemented
return cmp(self.path, other.path)
def createDirectory(self):
"""
Create the directory the L{FilePath} refers to.
@see: L{makedirs}
@raise OSError: If the directory cannot be created.
"""
os.mkdir(self.path)
def requireCreate(self, val=1):
"""
Sets the C{alwaysCreate} variable.
@param val: C{True} or C{False}, indicating whether opening this path
will be required to create the file or not.
@type val: L{bool}
@return: L{None}
"""
self.alwaysCreate = val
def create(self) -> IO[bytes]:
"""
Exclusively create a file, only if this file previously did not exist.
@return: A file-like object opened from this path.
"""
fdint = os.open(self.path, _CREATE_FLAGS)
# XXX TODO: 'name' attribute of returned files is not mutable or
# settable via fdopen, so this file is slightly less functional than the
# one returned from 'open' by default. send a patch to Python...
return cast(IO[bytes], os.fdopen(fdint, "w+b"))
def temporarySibling(self, extension=b""):
"""
Construct a path referring to a sibling of this path.
The resulting path will be unpredictable, so that other subprocesses
should neither accidentally attempt to refer to the same path before it
is created, nor they should other processes be able to guess its name
in advance.
@param extension: A suffix to append to the created filename. (Note
that if you want an extension with a '.' you must include the '.'
yourself.)
@type extension: L{bytes} or L{unicode}
@return: a path object with the given extension suffix, C{alwaysCreate}
set to True.
@rtype: L{FilePath} with a mode equal to the type of C{extension}
"""
ourPath = self._getPathAsSameTypeAs(extension)
sib = self.sibling(
_secureEnoughString(ourPath)
+ self.clonePath(ourPath).basename()
+ extension
)
sib.requireCreate()
return sib
_chunkSize = 2 ** 2 ** 2 ** 2
def copyTo(self, destination, followLinks=True):
"""
Copies self to destination.
If self doesn't exist, an OSError is raised.
If self is a directory, this method copies its children (but not
itself) recursively to destination - if destination does not exist as a
directory, this method creates it. If destination is a file, an
IOError will be raised.
If self is a file, this method copies it to destination. If
destination is a file, this method overwrites it. If destination is a
directory, an IOError will be raised.
If self is a link (and followLinks is False), self will be copied
over as a new symlink with the same target as returned by os.readlink.
That means that if it is absolute, both the old and new symlink will
link to the same thing. If it's relative, then perhaps not (and
it's also possible that this relative link will be broken).
File/directory permissions and ownership will NOT be copied over.
If followLinks is True, symlinks are followed so that they're treated
as their targets. In other words, if self is a link, the link's target
will be copied. If destination is a link, self will be copied to the
destination's target (the actual destination will be destination's
target). Symlinks under self (if self is a directory) will be
followed and its target's children be copied recursively.
If followLinks is False, symlinks will be copied over as symlinks.
@param destination: the destination (a FilePath) to which self
should be copied
@param followLinks: whether symlinks in self should be treated as links
or as their targets
"""
if self.islink() and not followLinks:
os.symlink(os.readlink(self.path), destination.path)
return
# XXX TODO: *thorough* audit and documentation of the exact desired
# semantics of this code. Right now the behavior of existent
# destination symlinks is convenient, and quite possibly correct, but
# its security properties need to be explained.
if self.isdir():
if not destination.exists():
destination.createDirectory()
for child in self.children():
destChild = destination.child(child.basename())
child.copyTo(destChild, followLinks)
elif self.isfile():
with destination.open("w") as writefile, self.open() as readfile:
while 1:
# XXX TODO: optionally use os.open, os.read and
# O_DIRECT and use os.fstatvfs to determine chunk sizes
# and make *****sure**** copy is page-atomic; the
# following is good enough for 99.9% of everybody and
# won't take a week to audit though.
chunk = readfile.read(self._chunkSize)
writefile.write(chunk)
if len(chunk) < self._chunkSize:
break
elif not self.exists():
raise OSError(errno.ENOENT, "No such file or directory")
else:
# If you see the following message because you want to copy
# symlinks, fifos, block devices, character devices, or unix
# sockets, please feel free to add support to do sensible things in
# reaction to those types!
raise NotImplementedError("Only copying of files and directories supported")
def moveTo(self, destination, followLinks=True):
"""
Move self to destination - basically renaming self to whatever
destination is named.
If destination is an already-existing directory,
moves all children to destination if destination is empty. If
destination is a non-empty directory, or destination is a file, an
OSError will be raised.
If moving between filesystems, self needs to be copied, and everything
that applies to copyTo applies to moveTo.
@param destination: the destination (a FilePath) to which self
should be copied
@param followLinks: whether symlinks in self should be treated as links
or as their targets (only applicable when moving between
filesystems)
"""
try:
os.rename(self._getPathAsSameTypeAs(destination.path), destination.path)
except OSError as ose:
if ose.errno == errno.EXDEV:
# man 2 rename, ubuntu linux 5.10 "breezy":
# oldpath and newpath are not on the same mounted filesystem.
# (Linux permits a filesystem to be mounted at multiple
# points, but rename(2) does not work across different mount
# points, even if the same filesystem is mounted on both.)
# that means it's time to copy trees of directories!
secsib = destination.temporarySibling()
self.copyTo(secsib, followLinks) # slow
secsib.moveTo(destination, followLinks) # visible
# done creating new stuff. let's clean me up.
mysecsib = self.temporarySibling()
self.moveTo(mysecsib, followLinks) # visible
mysecsib.remove() # slow
else:
raise
else:
self.changed()
destination.changed()
FilePath.clonePath = FilePath # type: ignore[attr-defined]
| 33.554658 | 88 | 0.602817 |
f76598553aa8a1a407308bcda49fec0a737f721f
| 5,950 |
py
|
Python
|
infra/libs/git.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | 2 |
2021-04-13T21:22:18.000Z
|
2021-09-07T02:11:57.000Z
|
infra/libs/git.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | 21 |
2020-09-06T02:41:05.000Z
|
2022-03-02T04:40:01.000Z
|
infra/libs/git.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility for dealing with Git repositories."""
import logging
import os
import shutil
import stat
import subprocess
import sys
import tempfile
import time
from types import MethodType
LOGGER = logging.getLogger(__name__)
class GitException(UserWarning):
"""Indicate that an error occured in the infra.libs.git module."""
pass
class Git(object):
"""Wrapper class to abstract git operations against a single repository.
Args:
path (str): The absolute or relative path to the repository on disk.
"""
def __init__(self, path): # pragma: no cover
self.path = os.path.abspath(path)
def __call__(self, *args, **kwargs): # pragma: no cover
"""Run a git command and returns its combined stdout and stderr.
Args:
args (list): passed as argument to the 'git' command.
kwargs (dict): passed to subprocess.check_output
Returns:
output (str): combined stdout and stderr.
"""
cmd = ['git'] + [str(arg) for arg in args]
kwargs.setdefault('cwd', self.path)
LOGGER.debug('Running `%s` with %s', ' '.join(cmd), kwargs)
out = subprocess.check_output(
cmd, stderr=subprocess.STDOUT, **kwargs)
return out
@property
def bare(self): # pragma: no cover
"""True if the repository is bare (is just the .git directory)."""
return self('config', '--get', 'core.bare').strip() == 'true'
def show(self, ref, path, *args): # pragma: no cover
"""Get the contents of a Git object (blob, tree, tag, or commit).
Args:
ref (string): The ref at which to show the object.
Can be an empty string.
path (string): The path to the blob or tree, relative to repository root.
Returns:
content (str): the requested object.
"""
treeish = ref + (':%s' % path if path else '')
cmd = ['show', treeish] + list(args)
return self(*cmd)
def number(self, *refs): # pragma: no cover
"""Get the commit position of each input ref.
Args:
refs (tuple of refishes): refishes to number.
Returns:
positions (list of [str|None]): respective numbers.
"""
positions = []
for ref in refs:
cmd = ['show', '-s', '--format=%B', ref]
out = self(*cmd)
found = False
for line in reversed(out.splitlines()):
if line.startswith('Cr-Commit-Position: '):
positions.append(line.split()[-1].strip())
found = True
break
if not found:
positions.append(None)
return positions
def NewGit(url, path, bare=False): # pragma: no cover
"""Factory function to create a Git object against a remote url.
Ensures the given path exists. If a git repository is already present
ensures that it points at the given url; otherwise, creates a git repository
from the given url.
Args:
url (str): The url of the remote repository.
path (str): The path to the local version of the repository.
bare (str, optional): Whether or not the local repo should be a bare clone.
Returns:
repo (:class:`Git`): object representing the local git repository.
Raises:
GitException
"""
# If the directory doesn't exist, create it.
if not os.path.isdir(path):
os.makedirs(path)
git = Git(path)
# If the directory has nothing in it, clone into it.
if not os.listdir(path):
b = ['--bare'] if bare else []
clone_cmd = ['clone'] + b + [url, '.']
git(*clone_cmd)
return git
# If the directory is already correctly configured for this repo, fetch.
try:
curr_url = git('config', '--get', 'remote.origin.url').strip()
if curr_url != url:
msg = ('A Git repo at %s exists, '
'but has %s configured as remote origin url.' % (path, curr_url))
LOGGER.error(msg)
raise GitException(msg)
if git.bare != bare:
msg = ('A Git repo at %s exists, but is %sbare.' %
(path, 'not ' if not git.bare else ''))
LOGGER.error(msg)
raise GitException(msg)
except subprocess.CalledProcessError:
msg = 'There appears to already be something else at %s.' % path
LOGGER.error(msg)
raise GitException(msg)
try:
git('fetch', 'origin')
except subprocess.CalledProcessError:
LOGGER.error('Failed to fetch origin.')
return git
def TmpGit(url, bare=False): # pragma: no cover
"""Factory function to create a temporary Git object against a remote url.
Creates a temporary directory, clones the repository into that directory,
and returns a Git object pointing at that temporary directory. The instance
will clean up after itself by deleting the temporary directory.
Args:
url (str): The url of the remote repository.
bare (bool): Whether or not the local repo should be a bare clone.
Returns:
git_repo (:class:`Git`): the local temporary git clone.
"""
path = tempfile.mkdtemp()
git = NewGit(url, path, bare)
def __del__(git_obj):
"""Destroy the temporary directory."""
def rm_on_error(_func, _path, _exc):
"""Error handling function to enforce removal of readonly files."""
if sys.platform.startswith('win'):
# On windows, we'll just fall back to using cmd.exe's rd.
for _ in xrange(3):
exitcode = subprocess.call(['cmd.exe', '/c', 'rd', '/q', '/s', path])
if exitcode == 0:
return
else:
LOGGER.warn('rd exited with code %d', exitcode)
time.sleep(3)
LOGGER.fatal('Failed to remove path %s', path)
else:
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 777
shutil.rmtree(path, ignore_errors=False, onerror=rm_on_error)
shutil.rmtree(git_obj.path, ignore_errors=False, onerror=rm_on_error)
git.__del__ = MethodType(__del__, git, Git) # pylint: disable=W0201
return git
| 30.670103 | 79 | 0.647731 |
f765cb2582b2b3cdee9ee2c1f4e944150a035732
| 7,684 |
py
|
Python
|
src/ggrc_basic_permissions/models.py
|
Killswitchz/ggrc-core
|
2460df94daf66727af248ad821462692917c97a9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc_basic_permissions/models.py
|
Killswitchz/ggrc-core
|
2460df94daf66727af248ad821462692917c97a9
|
[
"ECL-2.0",
"Apache-2.0"
] | 10 |
2018-07-06T00:04:23.000Z
|
2021-02-26T21:13:20.000Z
|
src/ggrc_basic_permissions/models.py
|
Killswitchz/ggrc-core
|
2460df94daf66727af248ad821462692917c97a9
|
[
"ECL-2.0",
"Apache-2.0"
] | 1 |
2017-11-11T22:16:56.000Z
|
2017-11-11T22:16:56.000Z
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import json
from logging import getLogger
from sqlalchemy.orm import backref
from ggrc import db
from ggrc.models import all_models
from ggrc.builder import simple_property
from ggrc.models.context import Context
from ggrc.models.person import Person
from ggrc.models.mixins import Base, Described
from ggrc.models import reflection
from ggrc_basic_permissions.contributed_roles import (
DECLARED_ROLE,
get_declared_role,
)
# pylint: disable=invalid-name
logger = getLogger(__name__)
class Role(Base, Described, db.Model):
"""A user role. All roles have a unique name. This name could be a simple
string, an email address, or some other form of string identifier.
Example:
.. code-block:: python
{
'create': ['Program', 'Control'],
'read': ['Program', 'Control'],
'update': ['Program', 'Control'],
'delete': ['Program'],
}
"""
__tablename__ = 'roles'
name = db.Column(db.String(128), nullable=False)
permissions_json = db.Column(db.Text(), nullable=False)
scope = db.Column(db.String(64), nullable=True)
role_order = db.Column(db.Integer(), nullable=True)
@simple_property
def permissions(self):
if self.permissions_json == DECLARED_ROLE:
declared_role = get_declared_role(self.name)
permissions = declared_role.permissions
else:
permissions = json.loads(self.permissions_json) or {}
# make sure not to omit actions
for action in ['create', 'read', 'update', 'delete']:
if action not in permissions:
permissions[action] = []
return permissions
@permissions.setter
def permissions(self, value):
self.permissions_json = json.dumps(value)
_api_attrs = reflection.ApiAttributes(
'name',
'permissions',
'scope',
'role_order',
)
def _display_name(self):
return self.name
Person._api_attrs.add('user_roles')
# FIXME: Cannot use `include_links`, because Memcache expiry doesn't handle
# sub-resources correctly
# Person._include_links.extend(['user_roles'])
# Override `Person.eager_query` to ensure `user_roles` is loaded efficiently
_orig_Person_eager_query = Person.eager_query
def _Person_eager_query(cls):
from sqlalchemy import orm
return _orig_Person_eager_query().options(
orm.subqueryload('user_roles'),
# orm.subqueryload('user_roles').undefer_group('UserRole_complete'),
# orm.subqueryload('user_roles').joinedload('context'),
# orm.subqueryload('user_roles').joinedload('role'),
)
Person.eager_query = classmethod(_Person_eager_query)
Context._api_attrs.add('user_roles')
_orig_Context_eager_query = Context.eager_query
def _Context_eager_query(cls):
from sqlalchemy import orm
return _orig_Context_eager_query().options(orm.subqueryload('user_roles'))
Context.eager_query = classmethod(_Context_eager_query)
class UserRole(Base, db.Model):
__tablename__ = 'user_roles'
# Override default from `ContextRBAC` to provide backref
context = db.relationship('Context', backref='user_roles')
role_id = db.Column(db.Integer(), db.ForeignKey('roles.id'), nullable=False)
role = db.relationship(
'Role', backref=backref('user_roles', cascade='all, delete-orphan'))
person_id = db.Column(
db.Integer(), db.ForeignKey('people.id'), nullable=False)
person = db.relationship(
'Person', backref=backref('user_roles', cascade='all, delete-orphan'))
@staticmethod
def _extra_table_args(cls):
return (db.Index('ix_user_roles_person', 'person_id'),)
_api_attrs = reflection.ApiAttributes('role', 'person')
@classmethod
def role_assignments_for(cls, context):
context_id = context.id if type(context) is Context else context
all_assignments = db.session.query(UserRole)\
.filter(UserRole.context_id == context_id)
assignments_by_user = {}
for assignment in all_assignments:
assignments_by_user.setdefault(assignment.person.email, [])\
.append(assignment.role)
return assignments_by_user
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(UserRole, cls).eager_query()
return query.options(
orm.subqueryload('role'),
orm.subqueryload('person'),
orm.subqueryload('context'))
def _display_name(self):
if self.context and self.context.related_object_type and \
self.context.related_object:
context_related = ' in ' + self.context.related_object.display_name
elif hasattr(self, '_display_related_title'):
context_related = ' in ' + self._display_related_title
elif self.context:
logger.warning('Unable to identify context.related for UserRole')
context_related = ''
else:
context_related = ''
return u'{0} <-> {1}{2}'.format(
self.person.display_name, self.role.display_name, context_related)
class ContextImplication(Base, db.Model):
'''A roles implication between two contexts. An implication may be scoped
with additional scoping properties on the target and source contexts. The
meaning of the scoping properties is determined by the module that
contributed the implication. For example, an implication may be scoped based
on the related objects of the contexts such as from a Program context to
an Audit context.
'''
__tablename__ = 'context_implications'
context_id = db.Column(
db.Integer(), db.ForeignKey('contexts.id'), nullable=True)
source_context_id = db.Column(
db.Integer(), db.ForeignKey('contexts.id'), nullable=True)
context_scope = db.Column(db.String, nullable=True)
source_context_scope = db.Column(db.String, nullable=True)
context = db.relationship(
'Context',
uselist=False,
foreign_keys=[context_id],
)
source_context = db.relationship(
'Context',
uselist=False,
foreign_keys=[source_context_id],
)
def _display_name(self):
if self.source_context:
source_context_display_name = self.source_context.display_name
else:
source_context_display_name = 'Default Context'
if self.context:
context_display_name = self.context.display_name
else:
context_display_name = 'Default Context'
return u'{source_context} -> {context}'.format(
source_context=source_context_display_name,
context=context_display_name,
)
all_models.register_model(Role)
all_models.register_model(UserRole)
all_models.register_model(ContextImplication)
def get_ids_related_to_user_role(object_type, related_type, related_ids):
if object_type == "Person":
related_model = getattr(all_models, related_type, None)
if not hasattr(related_model, "context_id"):
return None
return db.session \
.query(UserRole.person_id.distinct()) \
.join(related_model, related_model.context_id == UserRole.context_id) \
.filter(related_model.id.in_(related_ids))
elif related_type == "Person":
object_model = getattr(all_models, object_type, None)
if not hasattr(object_model, "context_id"):
return None
return db.session \
.query(object_model.id.distinct()) \
.join(UserRole, UserRole.context_id == object_model.context_id) \
.filter(UserRole.person_id.in_(related_ids))
else:
return None
def get_ids_related_to(object_type, related_type, related_ids):
functions = [get_ids_related_to_user_role]
queries = (f(object_type, related_type, related_ids) for f in functions)
non_empty = [q for q in queries if q]
if len(non_empty) == 0:
return None
return non_empty.pop().union(*non_empty)
| 31.109312 | 79 | 0.716944 |
f765efe131e3a624b4e3ef959aa66a729d678342
| 7,139 |
py
|
Python
|
tests/parse/test_parse_coordinates.py
|
bjhall/scout
|
ea772cf8d233223e0ec5271f61b95d3afcf719ad
|
[
"BSD-3-Clause"
] | null | null | null |
tests/parse/test_parse_coordinates.py
|
bjhall/scout
|
ea772cf8d233223e0ec5271f61b95d3afcf719ad
|
[
"BSD-3-Clause"
] | null | null | null |
tests/parse/test_parse_coordinates.py
|
bjhall/scout
|
ea772cf8d233223e0ec5271f61b95d3afcf719ad
|
[
"BSD-3-Clause"
] | null | null | null |
from scout.parse.variant.coordinates import (
get_cytoband_coordinates,
get_sub_category,
get_length,
get_end,
parse_coordinates,
)
class CyvcfVariant(object):
"""Mock a cyvcf variant
Default is to return a variant with three individuals high genotype
quality.
"""
def __init__(
self,
chrom="1",
pos=80000,
ref="A",
alt="C",
end=None,
gt_quals=[60, 60, 60],
gt_types=[1, 1, 0],
var_type="snv",
info_dict={},
):
super(CyvcfVariant, self).__init__()
self.CHROM = chrom
self.POS = pos
self.REF = ref
self.ALT = [alt]
self.end = end or pos
self.gt_quals = gt_quals
self.gt_types = gt_types
self.var_type = var_type
self.INFO = info_dict
def test_parse_coordinates_snv():
variant = CyvcfVariant()
coordinates = parse_coordinates(variant, "snv")
assert coordinates["position"] == variant.POS
def test_parse_coordinates_indel():
variant = CyvcfVariant(alt="ACCC", end=80003)
coordinates = parse_coordinates(variant, "snv")
assert coordinates["position"] == variant.POS
assert coordinates["end"] == variant.end
def test_parse_coordinates_translocation():
info_dict = {"SVTYPE": "BND"}
variant = CyvcfVariant(
ref="N",
alt="N[hs37d5:12060532[",
pos=724779,
end=724779,
var_type="sv",
info_dict=info_dict,
)
coordinates = parse_coordinates(variant, "sv")
assert coordinates["position"] == variant.POS
assert coordinates["end"] == 12060532
assert coordinates["end_chrom"] == "hs37d5"
assert coordinates["length"] == 10e10
assert coordinates["sub_category"] == "bnd"
def test_parse_coordinates_translocation_2():
info_dict = {"SVTYPE": "BND"}
variant = CyvcfVariant(
ref="N",
alt="N[GL000232.1:25141[",
pos=724779,
end=724779,
var_type="sv",
info_dict=info_dict,
)
coordinates = parse_coordinates(variant, "sv")
assert coordinates["position"] == variant.POS
assert coordinates["end"] == 25141
assert coordinates["end_chrom"] == "GL000232.1"
assert coordinates["length"] == 10e10
assert coordinates["sub_category"] == "bnd"
###### parse subcategory #######
def test_get_subcategory_snv():
alt_len = 1
ref_len = 1
category = "snv"
svtype = None
sub_category = get_sub_category(alt_len, ref_len, category, svtype)
assert sub_category == "snv"
def test_get_subcategory_indel():
alt_len = 1
ref_len = 3
category = "snv"
svtype = None
sub_category = get_sub_category(alt_len, ref_len, category, svtype)
assert sub_category == "indel"
###### parse length #######
# get_length(alt_len, ref_len, category, pos, end, svtype=None, svlen=None)
def test_get_length_snv():
alt_len = 1
ref_len = 1
category = "snv"
pos = end = 879537
length = get_length(alt_len, ref_len, category, pos, end)
assert length == 1
def test_get_length_indel():
alt_len = 3
ref_len = 1
category = "snv"
pos = end = 879537
length = get_length(alt_len, ref_len, category, pos, end)
assert length == 2
def test_get_sv_length_small_ins():
## GIVEN an insertion with whole sequence in alt field
alt_len = 296
ref_len = 1
category = "sv"
# Pos and end is same for insertions
pos = end = 144343218
svtype = "ins"
svlen = 296
## WHEN parsing the length
length = get_length(alt_len, ref_len, category, pos, end, svtype, svlen)
## THEN assert that the length is correct
assert length == 296
def test_get_sv_length_large_ins_no_length():
## GIVEN an imprecise insertion
alt_len = 5
ref_len = 1
category = "sv"
# Pos and end is same for insertions
pos = end = 133920667
svtype = "ins"
svlen = None
## WHEN parsing the length
length = get_length(alt_len, ref_len, category, pos, end, svtype, svlen)
## THEN assert that the length is correct
assert length == -1
def test_get_sv_length_translocation():
## GIVEN an translocation
alt_len = 16
ref_len = 1
category = "sv"
pos = 726044
end = None
svtype = "bnd"
svlen = None
## WHEN parsing the length
length = get_length(alt_len, ref_len, category, pos, end, svtype, svlen)
## THEN assert that the length is correct
assert length == 10e10
def test_get_sv_length_cnvnator_del():
## GIVEN an cnvnator type deletion
alt_len = 5
ref_len = 1
category = "sv"
pos = 1
end = 10000
svtype = "del"
svlen = -10000
## WHEN parsing the length
length = get_length(alt_len, ref_len, category, pos, end, svtype, svlen)
## THEN assert that the length is correct
assert length == 10000
def test_get_sv_length_del_no_length():
## GIVEN an deletion without len
alt_len = 5
ref_len = 1
category = "sv"
pos = 869314
end = 870246
svtype = "del"
svlen = None
## WHEN parsing the length
length = get_length(alt_len, ref_len, category, pos, end, svtype, svlen)
## THEN assert that the length is correct
assert length == end - pos
###### parse end #######
# get_end(pos, alt, category, snvend, svend, svlen)
# snv/indels are easy since cyvcf2 are parsing the end for us
def test_get_end_snv():
alt = "T"
category = "snv"
pos = snvend = 879537
end = get_end(pos, alt, category, snvend, svend=None, svlen=None)
assert end == snvend
def test_get_end_indel():
alt = "C"
category = "indel"
pos = 302253
snvend = 302265
end = get_end(pos, alt, category, snvend, svend=None, svlen=None)
assert end == snvend
# SVs are much harder since there are a lot of corner cases
# Most SVs (except translocations) have END annotated in INFO field
# The problem is that many times END==POS and then we have to do some magic on our own
def test_get_end_tiddit_translocation():
## GIVEN a translocation
alt = "N[hs37d5:12060532["
category = "sv"
pos = 724779
## WHEN parsing the end coordinate
end = get_end(pos, alt, category, snvend=None, svend=None, svlen=None)
## THEN assert that the end is the same as en coordinate described in alt field
assert end == 12060532
def test_get_end_tiddit_translocation():
## GIVEN a translocation
alt = "N[hs37d5:12060532["
category = "sv"
pos = 724779
## WHEN parsing the end coordinate
end = get_end(pos, alt, category, snvend=None, svend=None, svlen=None)
## THEN assert that the end is the same as en coordinate described in alt field
assert end == 12060532
def test_get_end_deletion():
## GIVEN a translocation
alt = "<DEL>"
category = "sv"
pos = 869314
svend = 870246
svlen = None
## WHEN parsing the end coordinate
end = get_end(pos, alt, category, snvend=None, svend=svend, svlen=svlen)
## THEN assert that the end is the same as en coordinate described in alt field
assert end == svend
| 23.639073 | 86 | 0.639305 |
f765fceb9d53ca36913754339081f2c433f71c9c
| 7,152 |
py
|
Python
|
chapter07/03_WindyGridWorld_nStepSARSA_OffPolicy.py
|
cemkaraoguz/reinforcement-learning-an-introduction-second-edition
|
735bfa6b66ffb52b7cf03966164e7bc1755942de
|
[
"MIT"
] | 1 |
2022-03-01T11:48:55.000Z
|
2022-03-01T11:48:55.000Z
|
chapter07/03_WindyGridWorld_nStepSARSA_OffPolicy.py
|
cemkaraoguz/reinforcement-learning-an-introduction-second-edition
|
735bfa6b66ffb52b7cf03966164e7bc1755942de
|
[
"MIT"
] | null | null | null |
chapter07/03_WindyGridWorld_nStepSARSA_OffPolicy.py
|
cemkaraoguz/reinforcement-learning-an-introduction-second-edition
|
735bfa6b66ffb52b7cf03966164e7bc1755942de
|
[
"MIT"
] | null | null | null |
'''
03_WindyGridWorld_nStepSARSA_OffPolicy.py : n-step off-policy SARSA applied to Windy Grid World problem (Example 6.5)
Cem Karaoguz, 2020
MIT License
'''
import numpy as np
import pylab as pl
from IRL.environments.Gridworlds import StochasticGridWorld
from IRL.agents.TemporalDifferenceLearning import nStepOffPolicySARSA
from IRL.utils.Policies import StochasticPolicy
from IRL.utils.Helpers import runSimulation
def runExperiment(nEpisodes, env, agent, policy_behaviour, doUpdateBehaviourPolicy):
reward_sums = []
episodesvstimesteps = []
timesteps = 0
for e in range(nEpisodes):
if(e%10==0):
print("Episode : ", e)
state = env.reset()
action = policy_behaviour.sampleAction(state)
done = False
experiences = [{}]
reward_sums.append(0.0)
while not done:
timesteps += 1
experiences[-1]['state'] = state
experiences[-1]['action'] = action
experiences[-1]['done'] = done
new_state, reward, done = env.step(action)
#print("State:", state, "Action: ", env.actionMapping[action][1], "Reward: ", reward, "New state:", new_state, "done:", done)
new_action = policy_behaviour.sampleAction(new_state)
xp = {}
xp['state'] = new_state
xp['reward'] = reward
xp['done'] = done
xp['action'] = new_action
experiences.append(xp)
agent.update(experiences[-2:], policy_behaviour)
state = new_state
action = new_action
episodesvstimesteps.append([e,timesteps])
reward_sums[-1] += reward
if(doUpdateBehaviourPolicy):
# update behaviour policy to be e-soft version of the target policy
for idx_state in range(env.nStates):
policy_behaviour.update(idx_state, agent.actionValueTable[idx_state,:])
return reward_sums, np.array(episodesvstimesteps)
if __name__=="__main__":
exerciseID = 0
nExperiments = 1
nEpisodes = 800
# Environment
sizeX = 10
sizeY = 7
defaultReward = -1.0
startStates = [(0,3)]
terminalStates = [(7,3)]
if exerciseID==0:
# Example 6.5
actionMapping = {0:(np.array([0,-1]), "N"), 1:(np.array([0,1]), "S"), 2:(np.array([1,0]), "E"), 3:(np.array([-1,0]), "W")}
sigmaY_actionNoise = 0
elif exerciseID==1:
# Exercise 6.9 part 1
actionMapping = {0:(np.array([0,-1]), "N"), 1:(np.array([0,1]), "S"), 2:(np.array([1,0]), "E"), 3:(np.array([-1,0]), "W"),
4:(np.array([1,-1]), "NE"), 5:(np.array([1,1]), "SE"), 6:(np.array([-1,-1]), "NW"), 7:(np.array([-1,1]), "SW")}
# Example 6.5 and Exercise 6.9
sigmaY_actionNoise = 0
# Exercise 6.10
sigmaY_actionNoise = 1
else:
# Exercise 6.9 part 2
actionMapping = {0:(np.array([0,-1]), "N"), 1:(np.array([0,1]), "S"), 2:(np.array([1,0]), "E"), 3:(np.array([-1,0]), "W"),
4:(np.array([1,-1]), "NE"), 5:(np.array([1,1]), "SE"), 6:(np.array([-1,-1]), "NW"), 7:(np.array([-1,1]), "SW"), 8:(np.array([0,0]), "0")}
sigmaY_actionNoise = 0
actionNoiseParams = {}
aux = [(x,y) for x in range(3,6) for y in range(0,7)]
for pos in aux:
actionNoiseParams[pos] = [0,-1,0,sigmaY_actionNoise]
aux = [(x,y) for x in range(6,8) for y in range(0,7)]
for pos in aux:
actionNoiseParams[pos] = [0,-2,0,sigmaY_actionNoise]
aux = [(8,y) for y in range(0,7)]
for pos in aux:
actionNoiseParams[pos] = [0,-1,0,sigmaY_actionNoise]
# Agent
alpha_nStepOPSARSA_1 = 0.1
gamma_nStepOPSARSA_1 = 1.0
n_nStepOPSARSA_1 = 1
alpha_nStepOPSARSA_2 = 0.1
gamma_nStepOPSARSA_2 = 1.0
n_nStepOPSARSA_2 = 5
alpha_nStepOPSARSA_3 = 0.05
gamma_nStepOPSARSA_3 = 1.0
n_nStepOPSARSA_3 = 10
# Policy
doUpdateBehaviourPolicy = True
epsilon_behaviourPolicy = 0.1
env = StochasticGridWorld(sizeX, sizeY, actionNoiseParams=actionNoiseParams, startStates=startStates,
defaultReward=defaultReward, terminalStates=terminalStates, actionMapping=actionMapping)
env.printEnv()
avg_reward_sums_nStepOPSARSA_1 = np.zeros(nEpisodes)
avg_reward_sums_nStepOPSARSA_2 = np.zeros(nEpisodes)
avg_reward_sums_nStepOPSARSA_3 = np.zeros(nEpisodes)
for idx_experiment in range(1, nExperiments+1):
print("Experiment : ", idx_experiment)
agent_nStepOPSARSA_1 = nStepOffPolicySARSA(env.nStates, env.nActions, alpha_nStepOPSARSA_1, gamma_nStepOPSARSA_1, n_nStepOPSARSA_1)
agent_nStepOPSARSA_2 = nStepOffPolicySARSA(env.nStates, env.nActions, alpha_nStepOPSARSA_2, gamma_nStepOPSARSA_2, n_nStepOPSARSA_2)
agent_nStepOPSARSA_3 = nStepOffPolicySARSA(env.nStates, env.nActions, alpha_nStepOPSARSA_3, gamma_nStepOPSARSA_3, n_nStepOPSARSA_3)
policy_behaviour = StochasticPolicy(env.nStates, env.nActions, policyUpdateMethod="esoft", epsilon=epsilon_behaviourPolicy)
reward_sums_nStepOPSARSA_1, evst_nStepOPSARSA_1 = runExperiment(nEpisodes, env, agent_nStepOPSARSA_1, policy_behaviour, doUpdateBehaviourPolicy)
policy_behaviour = StochasticPolicy(env.nStates, env.nActions, policyUpdateMethod="esoft", epsilon=epsilon_behaviourPolicy)
reward_sums_nStepOPSARSA_2, evst_nStepOPSARSA_2 = runExperiment(nEpisodes, env, agent_nStepOPSARSA_2, policy_behaviour, doUpdateBehaviourPolicy)
policy_behaviour = StochasticPolicy(env.nStates, env.nActions, policyUpdateMethod="esoft", epsilon=epsilon_behaviourPolicy)
reward_sums_nStepOPSARSA_3, evst_nStepOPSARSA_3 = runExperiment(nEpisodes, env, agent_nStepOPSARSA_3, policy_behaviour, doUpdateBehaviourPolicy)
avg_reward_sums_nStepOPSARSA_1 = avg_reward_sums_nStepOPSARSA_1 + (1.0/idx_experiment)*(reward_sums_nStepOPSARSA_1 - avg_reward_sums_nStepOPSARSA_1)
avg_reward_sums_nStepOPSARSA_2 = avg_reward_sums_nStepOPSARSA_2 + (1.0/idx_experiment)*(reward_sums_nStepOPSARSA_2 - avg_reward_sums_nStepOPSARSA_2)
avg_reward_sums_nStepOPSARSA_3 = avg_reward_sums_nStepOPSARSA_3 + (1.0/idx_experiment)*(reward_sums_nStepOPSARSA_3 - avg_reward_sums_nStepOPSARSA_3)
pl.figure()
pl.plot(evst_nStepOPSARSA_1[:,1],evst_nStepOPSARSA_1[:,0], '-r', label=str(n_nStepOPSARSA_1)+' Step SARSA')
pl.plot(evst_nStepOPSARSA_2[:,1],evst_nStepOPSARSA_2[:,0], '-g', label=str(n_nStepOPSARSA_2)+' Step SARSA')
pl.plot(evst_nStepOPSARSA_3[:,1],evst_nStepOPSARSA_3[:,0], '-k', label=str(n_nStepOPSARSA_3)+' Step SARSA')
pl.xlabel("Time steps")
pl.ylabel("Episodes")
pl.legend()
pl.figure()
pl.plot(avg_reward_sums_nStepOPSARSA_1, '-r', label=str(n_nStepOPSARSA_1)+' Step SARSA')
pl.plot(avg_reward_sums_nStepOPSARSA_2, '-g', label=str(n_nStepOPSARSA_2)+' Step SARSA')
pl.plot(avg_reward_sums_nStepOPSARSA_3, '-k', label=str(n_nStepOPSARSA_3)+' Step SARSA')
pl.xlabel("Episodes")
pl.ylabel("Sum of reward during episodes")
pl.legend()
pl.show()
agents = [agent_nStepOPSARSA_1, agent_nStepOPSARSA_2, agent_nStepOPSARSA_3]
for agent in agents:
print("Policy for :", agent.getName())
env.printEnv(agent)
for agent in agents:
input("Press any key to simulate agent "+agent.getName())
agentHistory = runSimulation(env, agent)
print("Simulation:", agent.getName())
env.render(agentHistory)
| 38.869565 | 152 | 0.699385 |
f76605f51fbae6ba9f9c4784486d333023f4e4a1
| 2,314 |
py
|
Python
|
propertyreader.py
|
vijay8451/quicksystem
|
515823ff88991daab1ec36958b252ce448c1ec76
|
[
"MIT"
] | 3 |
2019-06-25T09:04:19.000Z
|
2019-10-01T13:02:29.000Z
|
propertyreader.py
|
vijay8451/quicksystem
|
515823ff88991daab1ec36958b252ce448c1ec76
|
[
"MIT"
] | 2 |
2021-03-31T19:00:02.000Z
|
2021-12-13T19:58:31.000Z
|
propertyreader.py
|
vijay8451/quicksystem
|
515823ff88991daab1ec36958b252ce448c1ec76
|
[
"MIT"
] | null | null | null |
import configparser
config = configparser.ConfigParser()
config.read('quicksystem.properties')
class Properties(object):
"""quicksystem properties reader """
def __init__(self):
self.randomSystem = RandomSystem()
self.beaker = Beaker()
self.jenkinsInstaller = JenkinsInstaller()
self.theSystem = TheSystem()
self.contentHost = ContentHost()
self.mymail = Mymail()
self.logslevel = LogsLevel()
class RandomSystem(object):
"""random system reader"""
def __init__(self):
self.system_count = config.get('RandomSystem', 'system_count')
class Beaker(object):
"""beaker reader"""
def __init__(self):
self.username = config.get('Beaker', 'krb_username')
self.password = config.get('Beaker', 'krb_password')
self.hub = config.get('Beaker', 'beaker_url')
self.env_host_string = config.get('Beaker', 'env_host_string')
self.env_username = config.get('Beaker', 'env_username')
self.env_password = config.get('Beaker', 'env_password')
class JenkinsInstaller(object):
"""Jenkins Installer reader"""
def __init__(self):
self.satellite_distribution = config.get('JenkinsInstaller', 'satellite_distribution')
self.satellite_version = config.get('JenkinsInstaller', 'satellite_version')
self.jenkins_url = config.get('JenkinsInstaller', 'jenkins_url')
self.jenkins_jobname = config.get('JenkinsInstaller', 'jenkins_jobname')
self.setup_fake_manifest_certificate = config.get('JenkinsInstaller',
'setup_fake_manifest_certificate')
class TheSystem(object):
"""theSystem reader"""
def __init__(self):
self.host = config.get('TheSystem', 'host')
self.distrotree = config.get('TheSystem', 'distrotree')
class ContentHost(object):
"""content host reader"""
def __init__(self):
self.host_count = config.get('ContentHost', 'host_count')
class Mymail(object):
"""Email reader"""
def __init__(self):
self.to_mail = config.get('Emails', 'to_mail')
self.from_mail = config.get('Emails', 'from_mail')
class LogsLevel(object):
"""Logs reader"""
def __init__(self):
self.level = config.get('Logs', 'level')
| 32.138889 | 94 | 0.651685 |
f7669e82da83ab072eb08ec76af342e2b055796e
| 2,300 |
py
|
Python
|
Model/code/training_restaurant_features.py
|
teomotun/Restaurant-Plug
|
1ecaab7bb60706ec0eca96c2f3efb31276c536e7
|
[
"Apache-2.0"
] | null | null | null |
Model/code/training_restaurant_features.py
|
teomotun/Restaurant-Plug
|
1ecaab7bb60706ec0eca96c2f3efb31276c536e7
|
[
"Apache-2.0"
] | null | null | null |
Model/code/training_restaurant_features.py
|
teomotun/Restaurant-Plug
|
1ecaab7bb60706ec0eca96c2f3efb31276c536e7
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import h5py
# Paths
DATA_HOME = "/content/drive/My Drive/Yelp-Restaurant-Classification/Model/data/"
FEATURES_HOME = '/content/drive/My Drive/Yelp-Restaurant-Classification/Model/features/'
# Get photo->business mapping from the file provided
train_photo_to_biz_ids = pd.read_csv(DATA_HOME + 'train_photo_to_biz_ids.csv')
# Get labels for businesses in the training data
train_data_business = pd.read_csv(DATA_HOME + 'train.csv').dropna()
# Sort these labels in the ascending order for simplicity e.g. (0, 6, 4, 2, 5) -> (0, 2, 4, 5, 6)
train_data_business['labels'] = train_data_business['labels'].apply(
lambda feature_vector: tuple(sorted(int(feature) for feature in feature_vector.split())))
train_data_business.set_index('business_id', inplace=True)
# Get business ids
business_ids = train_data_business.index.unique()
print("Total train business:", len(business_ids))
# Reading stored features from h5 file
train_features_file = h5py.File(FEATURES_HOME + 'train_features.h5', 'r')
train_features = np.copy(train_features_file['feature'])
train_features_file.close()
# Create a pandas dataframe to make the data ready for training the SVM classifier in the following format
train_df = pd.DataFrame(columns=['business_id', 'label', 'feature'])
for business_id in business_ids:
"""
For each business, write the values for the above triplet in the file viz. ['business_id', 'label', 'feature']
"""
business_id = int(business_id)
# Get the labels for the current business
label = train_data_business.loc[business_id]['labels']
# Get all the images which represent the current business with business_id
images_for_business_id = train_photo_to_biz_ids[train_photo_to_biz_ids['business_id'] == business_id].index.tolist()
# As a feature for current business, take the average over all the images
feature = list(np.mean(train_features[images_for_business_id], axis=0))
# Put the triplet into the data frame
train_df.loc[business_id] = [business_id, label, feature]
print("Train business feature extraction is completed.")
# Write the above data frame into a csv file
with open(FEATURES_HOME + 'train_aggregate_features.csv', 'w') as business_features_file:
train_df.to_csv(business_features_file, index=False)
| 43.396226 | 120 | 0.762609 |
f766a09a5537452dacf391bc964b17564c12384c
| 4,137 |
py
|
Python
|
easy_gold/image_utils.py
|
nakayama-r/Probspace-kiva-3rd
|
1fe41aa10b4dd2ec992096228a03b85b61a28a2f
|
[
"MIT"
] | null | null | null |
easy_gold/image_utils.py
|
nakayama-r/Probspace-kiva-3rd
|
1fe41aa10b4dd2ec992096228a03b85b61a28a2f
|
[
"MIT"
] | null | null | null |
easy_gold/image_utils.py
|
nakayama-r/Probspace-kiva-3rd
|
1fe41aa10b4dd2ec992096228a03b85b61a28a2f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 10 02:41:05 2021
@author: r00526841
"""
from utils import *
from PIL import Image
#import imagehash
import cv2
def getImageStatistics(df, ppath_to_dir, ppath_to_label_dir=None):
for index, row in df.iterrows():
ppath_to_image = ppath_to_dir / row["image_name"]
img = Image.open(ppath_to_image)
np_im = np.array(img)
df.loc[index, "img_H"] = np_im.shape[0]
df.loc[index, "img_W"] = np_im.shape[1]
df.loc[index, "img_C"] = np_im.shape[2]
df.loc[index, "img_R_mean"] = np_im[...,0].mean()
df.loc[index, "img_G_mean"] = np_im[...,1].mean()
df.loc[index, "img_B_mean"] = np_im[...,2].mean()
df.loc[index, "img_R_std"] = np_im[...,0].std()
df.loc[index, "img_G_std"] = np_im[...,1].std()
df.loc[index, "img_B_std"] = np_im[...,2].std()
df.loc[index, "img_R_min"] = np_im[...,0].min()
df.loc[index, "img_G_min"] = np_im[...,1].min()
df.loc[index, "img_B_min"] = np_im[...,2].min()
df.loc[index, "img_R_max"] = np_im[...,0].max()
df.loc[index, "img_G_max"] = np_im[...,1].max()
df.loc[index, "img_B_max"] = np_im[...,2].max()
#pdb.set_trace()
# for p in ppath_to_dir.iterdir():
# di = {}
# sar_img_list = ["0_VH", "1_VH", "0_VV", "1_VV"]
# for sar_name in sar_img_list:
# ppath_to_tif = p/f"{sar_name}.tif"
# img = Image.open(ppath_to_tif)
# np_im = np.array(img)
# di[f"{sar_name}_path_to_tif"] = ppath_to_tif
# di[f"{sar_name}_H"] = np_im.shape[0]
# di[f"{sar_name}_W"] = np_im.shape[1]
# di[f"{sar_name}_mean"] = np_im.mean()
# di[f"{sar_name}_std"] = np_im.std()
# di[f"{sar_name}_max"] = np_im.max()
# di[f"{sar_name}_min"] = np_im.min()
# if ppath_to_label_dir is not None:
# ppath_to_label = ppath_to_label_dir/f"{p.name}.png"
# label_img = Image.open(ppath_to_label)
# np_label_img = np.array(label_img)
# di["label_path"] = ppath_to_label
# di["label_H"] = np_label_img.shape[0]
# di["label_W"] = np_label_img.shape[1]
# di["label_mean"] = np_label_img.mean()
# di["label_std"] = np_label_img.std()
# di["label_max"] = np_label_img.max()
# di["label_min"] = np_label_img.min()
# di["num_1"] = np.count_nonzero(np_label_img)
# di["num_0"] = np_label_img.size - di["num_1"]
# di["rate_new_building"] = float(di["num_1"]) / float(np_label_img.size)
# df_each = pd.DataFrame(di, index=[p.name])
# df = df.append(df_each)
return df
def getSaliencyImg(path_to_image, salient_type="SR"):
img = cv2.imread(path_to_image)
if salient_type == 'SR':
saliency = cv2.saliency.StaticSaliencySpectralResidual_create()
elif salient_type == 'FG':
saliency = cv2.saliency.StaticSaliencyFineGrained_create()
(success, saliencyMap) = saliency.computeSaliency(img)
#saliencyMap = (saliencyMap * 255).astype("uint8")
return saliencyMap
def getCenteringImgFromSaliencyImg(img, saliency_img):
img_h, img_w = img.shape[:2]
img_center_h = img_h // 2
img_center_w = img_w // 2
salient_pt_h, salient_pt_w = np.unravel_index(np.argmax(saliency_img), saliency_img.shape)
offset_x = img_center_w - salient_pt_w
offset_y = img_center_h - salient_pt_h
mat = np.float32([[1, 0, offset_x], [0, 1, offset_y]])
dst = cv2.warpAffine(img, mat,(img_w,img_h))
dst_salient = cv2.warpAffine(saliency_img, mat,(img_w,img_h))
#pdb.set_trace()
return dst, dst_salient
def getImageHash(pil_img):
funcs = [
imagehash.average_hash,
imagehash.phash,
imagehash.dhash,
imagehash.whash,
]
hash = np.array([f(pil_img).hash for f in funcs]).reshape(256)
#pdb.set_trace()
return hash
| 30.873134 | 94 | 0.570462 |
f766aff2deae1462ac8ec3e9543d23839c4adfde
| 2,811 |
py
|
Python
|
tofu/data/_generic_check.py
|
WinstonLHS/tofu
|
c95b2eb6aedcf4bac5676752b9635b78f31af6ca
|
[
"MIT"
] | 56 |
2017-07-09T10:29:45.000Z
|
2022-03-31T02:44:50.000Z
|
tofu/data/_generic_check.py
|
WinstonLHS/tofu
|
c95b2eb6aedcf4bac5676752b9635b78f31af6ca
|
[
"MIT"
] | 522 |
2017-07-02T21:06:07.000Z
|
2022-03-02T08:07:57.000Z
|
tofu/data/_generic_check.py
|
Didou09/tofu
|
4a4e1f058bab8e7556ed9d518f90807cec605476
|
[
"MIT"
] | 9 |
2017-07-02T20:38:53.000Z
|
2021-12-04T00:12:30.000Z
|
# -*- coding: utf-8 -*-
# common
import matplotlib.pyplot as plt
_LALLOWED_AXESTYPES = [
'cross', 'hor',
'matrix',
'timetrace',
'profile1d',
'image',
'misc'
]
# #############################################################################
# #############################################################################
# Utilities
# #############################################################################
def _check_var(
var,
varname,
types=None,
default=None,
allowed=None,
excluded=None,
):
# set to default
if var is None:
var = default
if var is None and allowed is not None and len(allowed) == 1:
var = allowed[0]
# check type
if types is not None:
if not isinstance(var, types):
msg = (
f"Arg {varname} must be of type {types}!\n"
f"Provided: {type(var)}"
)
raise Exception(msg)
# check if allowed
if allowed is not None:
if var not in allowed:
msg = (
f"Arg {varname} must be in {allowed}!\n"
f"Provided: {var}"
)
raise Exception(msg)
# check if excluded
if excluded is not None:
if var in excluded:
msg = (
f"Arg {varname} must not be in {excluded}!\n"
f"Provided: {var}"
)
raise Exception(msg)
return var
# #############################################################################
# #############################################################################
# Utilities for plotting
# #############################################################################
def _check_dax(dax=None, main=None):
# None
if dax is None:
return dax
# Axes
if issubclass(dax.__class__, plt.Axes):
if main is None:
msg = (
)
raise Exception(msg)
else:
return {main: dax}
# dict
c0 = (
isinstance(dax, dict)
and all([
isinstance(k0, str)
and (
(
k0 in _LALLOWED_AXESTYPES
and issubclass(v0.__class__, plt.Axes)
)
or (
isinstance(v0, dict)
and issubclass(v0.get('ax').__class__, plt.Axes)
and v0.get('type') in _LALLOWED_AXESTYPES
)
)
for k0, v0 in dax.items()
])
)
if not c0:
msg = (
)
raise Exception(msg)
for k0, v0 in dax.items():
if issubclass(v0.__class__, plt.Axes):
dax[k0] = {'ax': v0, 'type': k0}
return dax
| 23.621849 | 79 | 0.37958 |
f766bf62eca60e16c5e19bed5d688451495b4bb4
| 1,255 |
py
|
Python
|
tests/commands/test_help.py
|
febuiles/two1-python
|
88704487dba7715f97a0980781d4c0efb2ea7fc4
|
[
"BSD-2-Clause-FreeBSD"
] | 415 |
2016-06-10T00:46:55.000Z
|
2021-10-16T00:56:06.000Z
|
tests/commands/test_help.py
|
febuiles/two1-python
|
88704487dba7715f97a0980781d4c0efb2ea7fc4
|
[
"BSD-2-Clause-FreeBSD"
] | 25 |
2016-06-11T13:48:59.000Z
|
2021-01-05T11:19:30.000Z
|
tests/commands/test_help.py
|
febuiles/two1-python
|
88704487dba7715f97a0980781d4c0efb2ea7fc4
|
[
"BSD-2-Clause-FreeBSD"
] | 109 |
2016-06-11T05:17:05.000Z
|
2021-12-22T11:02:22.000Z
|
"""Help command unit tests """
import importlib
import pytest
import two1.cli
@pytest.mark.unit
def test_help_text_format():
"""Confirm each command's help ends with a period and is <45 chars.
This test uses metaprogramming to generate the list of functions
of the form two1.commands.buy.buy, asserting that each of them has
a first line in their docstring which fits in click's docstring
limits (45 chars). This prevents inadvertent ellipses in the
autogenerated help for `21 help`, as shown below:
Commands:
search Search for apps listed on the 21 Marketplace...
sell Sell containerized apps for bitcoin...
send Send a specified address some satoshis.
"""
command_names = sorted(two1.cli.main.commands.keys())
for command_name in command_names:
module = importlib.import_module('two1.commands.' + command_name)
command = getattr(module, command_name)
intro = command.help.split("\n")[0]
MAX_CLICK_COMMAND_INTRO_LENGTH = 45
assert intro.endswith('.')
assert len(intro) <= MAX_CLICK_COMMAND_INTRO_LENGTH, \
"Shorten intro to two1.commands.%s.%s to 45 chars" % \
(command_name, command_name)
| 35.857143 | 73 | 0.679681 |
f766c69ee14e81bb14eb3b54c3f7baf35788211b
| 6,495 |
py
|
Python
|
src/pipelines/weather/weather_pipeline.py
|
harrisonzhu508/data
|
a3b95ced4abad6653d20f67f3f285abeeb0c2b25
|
[
"Apache-2.0"
] | null | null | null |
src/pipelines/weather/weather_pipeline.py
|
harrisonzhu508/data
|
a3b95ced4abad6653d20f67f3f285abeeb0c2b25
|
[
"Apache-2.0"
] | null | null | null |
src/pipelines/weather/weather_pipeline.py
|
harrisonzhu508/data
|
a3b95ced4abad6653d20f67f3f285abeeb0c2b25
|
[
"Apache-2.0"
] | null | null | null |
import re
import sys
import math
from random import shuffle
from functools import partial
from typing import Any, Dict, List, Tuple
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool as Pool
import numpy
from tqdm.contrib import concurrent
from pandas import DataFrame, Series, Int64Dtype, merge, read_csv, concat, isna
from lib.cast import safe_int_cast
from lib.pipeline import DataPipeline, DefaultPipeline, PipelineChain
from lib.time import datetime_isoformat
from lib.utils import ROOT
class WeatherPipeline(DefaultPipeline):
# A bit of a circular dependency but we need the latitude and longitude to compute weather
def fetch(self, cache: Dict[str, str], **fetch_opts) -> List[str]:
return [ROOT / "output" / "tables" / "geography.csv"]
@staticmethod
def haversine_distance(
stations: DataFrame, lat: float, lon: float, radius: float = 6373.0
) -> Series:
""" Compute the distance between two <latitude, longitude> pairs in kilometers """
# Compute the pairwise deltas
lat_diff = stations.lat - lat
lon_diff = stations.lon - lon
# Apply Haversine formula
a = numpy.sin(lat_diff / 2) ** 2
a += math.cos(lat) * numpy.cos(stations.lat) * numpy.sin(lon_diff / 2) ** 2
c = numpy.arctan2(numpy.sqrt(a), numpy.sqrt(1 - a)) * 2
return radius * c
@staticmethod
def nearest_station(stations, lat: float, lon: float):
# Compute the distance with each station
distances = WeatherPipeline.haversine_distance(stations, lat, lon)
# Return the closest station and its distance
idxmin = distances.idxmin()
return distances.loc[idxmin], stations.loc[idxmin]
@staticmethod
def fix_temp(value: int):
value = safe_int_cast(value)
return None if value is None else "%.1f" % (value / 10.0)
@staticmethod
def station_records(station_cache: Dict[str, DataFrame], stations: DataFrame, location: Series):
# Get the nearest station from our list of stations given lat and lon
distance, nearest = WeatherPipeline.nearest_station(stations, location.lat, location.lon)
# Query the cache and pull data only if not already cached
if nearest.id not in station_cache:
# Read the records from the nearest station
station_url = (
"https://www.ncei.noaa.gov/data"
"/global-historical-climatology-network-daily/access/{}.csv"
).format(nearest.id)
column_mapping = {
"DATE": "date",
"STATION": "noaa_station",
"TMIN": "minimum_temperature",
"TMAX": "maximum_temperature",
"PRCP": "rainfall",
"SNOW": "snowfall",
}
data = read_csv(station_url, usecols=lambda column: column in column_mapping.keys())
data = data.rename(columns=column_mapping)
# Convert temperature to correct values
data["minimum_temperature"] = data["minimum_temperature"].apply(
WeatherPipeline.fix_temp
)
data["maximum_temperature"] = data["maximum_temperature"].apply(
WeatherPipeline.fix_temp
)
# Get only data for 2020 and add location values
data = data[data.date > "2019-12-31"]
# Save into the cache
station_cache[nearest.id] = data
# Get station records from the cache
data = station_cache[nearest.id].copy()
# Return all the available data from the records
output_columns = [
"date",
"key",
"noaa_station",
"noaa_distance",
"minimum_temperature",
"maximum_temperature",
"rainfall",
"snowfall",
]
data["key"] = location.key
data["noaa_distance"] = "%.03f" % distance
return data[[col for col in output_columns if col in data.columns]]
def parse_dataframes(
self, dataframes: List[DataFrame], aux: Dict[str, DataFrame], **parse_opts
):
# Get all the weather stations with data up until 2020
stations_url = "https://www1.ncdc.noaa.gov/pub/data/ghcn/daily/ghcnd-inventory.txt"
stations = read_csv(
stations_url,
sep=r"\s+",
names=("id", "lat", "lon", "measurement", "year_start", "year_end"),
)
stations = stations[stations.year_end == 2020][["id", "lat", "lon", "measurement"]]
# Filter stations that at least provide max and min temps
measurements = ["TMIN", "TMAX"]
stations = stations.groupby(["id", "lat", "lon"]).agg(lambda x: "|".join(x))
stations = stations[stations.measurement.apply(lambda x: all(m in x for m in measurements))]
stations = stations.reset_index()
# Get all the POI from metadata and go through each key
metadata = dataframes[0][["key", "latitude", "longitude"]].dropna()
# Convert all coordinates to radians
stations["lat"] = stations.lat.apply(math.radians)
stations["lon"] = stations.lon.apply(math.radians)
metadata["lat"] = metadata.latitude.apply(math.radians)
metadata["lon"] = metadata.longitude.apply(math.radians)
# Use a cache to avoid having to query the same station multiple times
station_cache: Dict[str, DataFrame] = {}
# Make sure the stations and the cache are sent to each function call
map_func = partial(WeatherPipeline.station_records, station_cache, stations)
# We don't care about the index while iterating over each metadata item
map_iter = [record for _, record in metadata.iterrows()]
# Shuffle the iterables to try to make better use of the caching
shuffle(map_iter)
# Bottleneck is network so we can use lots of threads in parallel
records = concurrent.thread_map(map_func, map_iter, total=len(metadata))
return concat(records)
class WeatherPipelineChain(PipelineChain):
schema: Dict[str, type] = {
"date": str,
"key": str,
"noaa_station": str,
"noaa_distance": float,
"minimum_temperature": float,
"maximum_temperature": float,
"rainfall": float,
"snowfall": float,
}
pipelines: List[Tuple[DataPipeline, Dict[str, Any]]] = [(WeatherPipeline(), {})]
| 37.327586 | 100 | 0.627098 |
f766dc02c99c74fafe61b68c1e65cd0777853322
| 2,045 |
py
|
Python
|
amatino/internal/entity_update_arguments.py
|
Amatino-Code/amatino-python
|
6c5f66b2e61bede5bf9d3e6eee8130a16f511a5f
|
[
"MIT"
] | 2 |
2018-07-20T20:00:33.000Z
|
2020-10-08T15:49:06.000Z
|
amatino/internal/entity_update_arguments.py
|
Amatino-Code/amatino-python
|
6c5f66b2e61bede5bf9d3e6eee8130a16f511a5f
|
[
"MIT"
] | 1 |
2020-05-21T02:49:29.000Z
|
2020-05-21T02:49:29.000Z
|
amatino/internal/entity_update_arguments.py
|
Amatino-Code/amatino-python
|
6c5f66b2e61bede5bf9d3e6eee8130a16f511a5f
|
[
"MIT"
] | 3 |
2018-09-03T09:31:31.000Z
|
2020-05-21T05:30:00.000Z
|
"""
Amatino API Python Bindings
Entity Update Arguments
Author: [email protected]
"""
from amatino.internal.encodable import Encodable
from amatino.internal.entity_create_arguments import NewEntityArguments
from amatino.internal.constrained_string import ConstrainedString
from typing import Optional
class EntityUpdateArguments(Encodable):
"""
A set of arguments suitable for provision to the Amatino API for the
purpose of updating an existing Entity
"""
def __init__(
self,
entity_id: str,
name: str,
description: str,
owner_id: int,
permissions_graph: Optional[dict]
) -> None:
assert isinstance(entity_id, str)
self._entity_id = entity_id
self._name = ConstrainedString(
name,
'name',
NewEntityArguments.MAX_NAME_LENGTH
)
self._description = ConstrainedString(
description,
'description',
NewEntityArguments.MAX_DESCRIPTION_LENGTH
)
if not isinstance(owner_id, int):
raise TypeError('owner_id must be of type `int`')
self._owner_id = owner_id
self._permissions_graph = permissions_graph
if permissions_graph is None:
return
if not isinstance(permissions_graph, dict):
raise TypeError('permissions_graph must be of type `dict`')
if False in [isinstance(k, str) for k in permissions_graph]:
raise TypeError('permissions_graph keys must be of type `str`')
upg = permissions_graph
if False in [isinstance(upg[k], dict) for k in upg]:
raise TypeError('permissions_graph values must be of type `dict`')
return
def serialise(self):
data = {
'entity_id': self._entity_id,
'name': str(self._name),
'description': str(self._description),
'owner_id': self._owner_id,
'permissions_graph': self._permissions_graph
}
return data
| 28.013699 | 78 | 0.632274 |
f766e0e63f77f6d33944033364b887b207981671
| 64 |
py
|
Python
|
outlookdisablespamfilter/__init__.py
|
jan-janssen/outlook-disable-spam-filter
|
264f5fb650408abbfa5736f33ae098aefc0e7fac
|
[
"BSD-3-Clause"
] | null | null | null |
outlookdisablespamfilter/__init__.py
|
jan-janssen/outlook-disable-spam-filter
|
264f5fb650408abbfa5736f33ae098aefc0e7fac
|
[
"BSD-3-Clause"
] | null | null | null |
outlookdisablespamfilter/__init__.py
|
jan-janssen/outlook-disable-spam-filter
|
264f5fb650408abbfa5736f33ae098aefc0e7fac
|
[
"BSD-3-Clause"
] | null | null | null |
from outlookdisablespamfilter.shared import transfer_spam_emails
| 64 | 64 | 0.9375 |
f766f0faf3cd4875039cd35c9dc82786fee31c98
| 449 |
py
|
Python
|
build/j2n6s300_ikfast/catkin_generated/pkg.develspace.context.pc.py
|
FProgrammerLIU/caster_man_ros
|
a75b503fad3a470f985072a2b3953e89074f3223
|
[
"MIT"
] | null | null | null |
build/j2n6s300_ikfast/catkin_generated/pkg.develspace.context.pc.py
|
FProgrammerLIU/caster_man_ros
|
a75b503fad3a470f985072a2b3953e89074f3223
|
[
"MIT"
] | null | null | null |
build/j2n6s300_ikfast/catkin_generated/pkg.develspace.context.pc.py
|
FProgrammerLIU/caster_man_ros
|
a75b503fad3a470f985072a2b3953e89074f3223
|
[
"MIT"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "moveit_core;pluginlib;roscpp;tf_conversions".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "j2n6s300_ikfast"
PROJECT_SPACE_DIR = "/home/caster/ros_ws/caster/devel/.private/j2n6s300_ikfast"
PROJECT_VERSION = "0.0.0"
| 49.888889 | 88 | 0.739421 |
f7671e91db5aaea8ae21a0a7e49fe16822ac3587
| 3,132 |
py
|
Python
|
scripts/run_sentence_indexer.py
|
MetaphorExtractionTools/metaphor-search
|
ca0ba6cda8d83eb9274eb2b9d7977df4a5374b18
|
[
"MIT"
] | 2 |
2018-05-25T01:47:50.000Z
|
2021-11-26T19:03:07.000Z
|
scripts/run_sentence_indexer.py
|
MetaphorExtractionTools/metaphor-search
|
ca0ba6cda8d83eb9274eb2b9d7977df4a5374b18
|
[
"MIT"
] | null | null | null |
scripts/run_sentence_indexer.py
|
MetaphorExtractionTools/metaphor-search
|
ca0ba6cda8d83eb9274eb2b9d7977df4a5374b18
|
[
"MIT"
] | 1 |
2020-02-06T16:04:07.000Z
|
2020-02-06T16:04:07.000Z
|
#!/usr/bin/env python
# coding: utf-8
# Author: Vladimir M. Zaytsev <[email protected]>
import os
import numpy
import shutil
import logging
import argparse
from sear.index import InvertedIndex # The index itself.
from sear.utils import IndexingPipeline # Utility which will control indexing process.
from sear.storage import LdbStorage # Storage for raw indexed documents.
from sear.lexicon import DictLexicon # Term lexicon backend.
from metaphor.lfsent import LFSentenceParser # High level LF sentences parser.
from metaphor.lfsent import LFSentenceStream # Class which does low-level LF sentences parsing.
from metaphor.lfsent import LFSentenceIndexer # Class which knows how to index parsed LF sentences.
logging.basicConfig(level=logging.INFO)
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-t", "--test", type=int, choices=(0, 1), default=0)
arg_parser.add_argument("-s", "--test_size", type=str, choices=("tiny", "medium", "large"), default="tiny")
arg_parser.add_argument("-l", "--language", type=str, choices=("rus", "spa", "eng"), default=None)
arg_parser.add_argument("-i", "--input", type=str)
arg_parser.add_argument("-o", "--output", type=str)
arguments = arg_parser.parse_args()
logging.info("Initializing output directory structure.")
if arguments.test == 1:
input_path = os.path.join(
arguments.input,
"test_data",
arguments.test_size,
arguments.language,
"sentence.txt"
)
output_path = os.path.join(
arguments.output,
"test_out",
arguments.test_size,
arguments.language,
"sentence"
)
else:
input_path = arguments.input
output_path = arguments.output
logging.info("Input: %s" % input_path)
logging.info("Output: %s" % output_path)
if os.path.exists(output_path):
shutil.rmtree(output_path)
os.makedirs(output_path)
logging.info("Initializing lexicon.")
lexicon = DictLexicon(output_path)
logging.info("Initializing storage.")
storage = LdbStorage(output_path)
storage.init_db()
storage.open_db()
logging.info("Initializing index.")
index = InvertedIndex(output_path, field_properties=[
("sentence_id", numpy.int32),
])
index.init_index()
index.open()
logging.info("Initializing sentence stream and its parser.")
sentence_stream = LFSentenceStream(input_path, language=arguments.language)
sentence_parser = LFSentenceParser()
sentence_indexer = LFSentenceIndexer(lexicon)
logging.info("Initializing indexing pipeline.")
indexing_pipeline = IndexingPipeline(lexicon, index, storage)
logging.info("Start indexing file: %s" % input_path)
input_mb_size = float(os.path.getsize(input_path)) / (1024 ** 2)
logging.info("Input size: %.2fMB" % input_mb_size)
indexing_pipeline.index_stream(sentence_stream, sentence_parser, sentence_indexer)
logging.info("Closing index.")
index.close()
logging.info("Closing storage.")
storage.close_db()
logging.info("Dumping lexicon.")
lexicon.dump()
logging.info("No way, it's done!")
| 27 | 111 | 0.707854 |
f767299211252b3099abb79bfe1a89b16ca79677
| 1,713 |
py
|
Python
|
gradescrape/login.py
|
guineawheek/gradescrape
|
88245c2a26decc499db2e8c5b75229b67fb6696b
|
[
"BSD-2-Clause"
] | null | null | null |
gradescrape/login.py
|
guineawheek/gradescrape
|
88245c2a26decc499db2e8c5b75229b67fb6696b
|
[
"BSD-2-Clause"
] | null | null | null |
gradescrape/login.py
|
guineawheek/gradescrape
|
88245c2a26decc499db2e8c5b75229b67fb6696b
|
[
"BSD-2-Clause"
] | null | null | null |
# requires: selenium, geckodriver
# Somewhat deprecated. Only use if desparate.
import json
__all__ = ["interactive_school_login"]
def interactive_school_login(school="berkeley"):
"""
Uses Selenium to interactively grab tokens from an interactive saml login.
Returns the cookies obtained.
You can save the returned cookies as json file and read them back into get_tokens.
example:
cookies = attempt_school_login()
with open("cookies.json", "w") as f:
json.dump(cookies, f, indent=4)
# ...
with open("cookies.json") as f:
cookies = login.get_tokens(json.load(f))
ses = session.Session(cookies)
"""
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
driver = webdriver.Firefox()
print("Log into your Calnet ID account.")
driver.get(f"https://gradescope.com/auth/saml/{school}?remember_me=1")
while True:
wait = WebDriverWait(driver, 120)
try:
wait.until(lambda driver: driver.current_url.startswith("https://www.gradescope.com") and "saml" not in driver.current_url)
break
except TimeoutException:
pass
print("Successful login detected, dumping cookies")
cookies = driver.get_cookies()
driver.close()
ret = {}
for cookie in cookies:
#if cookie['name'] in ("signed_token", "remember_me", "_gradescope_session"):
ret[cookie['name']] = cookie['value']
return ret
if __name__ == "__main__":
cookies = interactive_school_login()
with open("cookies.json", "w") as f:
json.dump(cookies, f, indent=4)
| 30.589286 | 135 | 0.664915 |
f76781418ef1e423ed98b97890791ef7aca02b31
| 8,015 |
py
|
Python
|
wienerschnitzelgemeinschaft/src/shai/fastai/other_ensemble_scripts/enstw41.py
|
guitarmind/HPA-competition-solutions
|
547d53aaca148fdb5f4585526ad7364dfa47967d
|
[
"MIT"
] | null | null | null |
wienerschnitzelgemeinschaft/src/shai/fastai/other_ensemble_scripts/enstw41.py
|
guitarmind/HPA-competition-solutions
|
547d53aaca148fdb5f4585526ad7364dfa47967d
|
[
"MIT"
] | null | null | null |
wienerschnitzelgemeinschaft/src/shai/fastai/other_ensemble_scripts/enstw41.py
|
guitarmind/HPA-competition-solutions
|
547d53aaca148fdb5f4585526ad7364dfa47967d
|
[
"MIT"
] | null | null | null |
# individual nan corrected
# Final nan matches highest probable label (optional)
import pandas as pd
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
SAMPLE = '../input/sample_submission.csv'
label_names = {
0: "Nucleoplasm",
1: "Nuclear membrane",
2: "Nucleoli",
3: "Nucleoli fibrillar center",
4: "Nuclear speckles",
5: "Nuclear bodies",
6: "Endoplasmic reticulum",
7: "Golgi apparatus",
8: "Peroxisomes",
9: "Endosomes",
10: "Lysosomes",
11: "Intermediate filaments",
12: "Actin filaments",
13: "Focal adhesion sites",
14: "Microtubules",
15: "Microtubule ends",
16: "Cytokinetic bridge",
17: "Mitotic spindle",
18: "Microtubule organizing center",
19: "Centrosome",
20: "Lipid droplets",
21: "Plasma membrane",
22: "Cell junctions",
23: "Mitochondria",
24: "Aggresome",
25: "Cytosol",
26: "Cytoplasmic bodies",
27: "Rods & rings"
}
column_sum = []
sub_name = []
def expand(csv):
sub = pd.read_csv(csv)
print(csv, sub.isna().sum())
sub = sub.replace(pd.np.nan, '101')
sub[f'target_vec'] = sub['Predicted'].map(lambda x: list(map(int, x.strip().split())))
for i in range(28):
sub[f'{label_names[i]}'] = sub['Predicted'].map(
lambda x: 1 if str(i) in x.strip().split() else 0)
sub = sub.values
sub = np.delete(sub, [1, 2], axis=1)
a = sub[:, 1:]
unique, counts = np.unique(a, return_counts=True)
print('Unique counts:',np.asarray((unique, counts)).T)
print('Total labels:{} Class-wise:{}'.format(a.sum(), a.sum(axis=0)))
column_sum.append( a.sum(axis=0))
sub_name.append(csv)
return sub
#======================================================================================================================
# Input submissions
#====================================================================================================================
sub_dir = 'sub_dir_team/'
#enstw39b
df_1 = expand('sub_dir_team/leak_brian_tommy_en_res34swa_re50xt_re101xtswa_wrn_4.8_562.csv') #1 +1
df_2 = expand( 'sub_dir_team/Christof_blend_4_580.csv') #2 +3
df_3 = expand('sub_dir_team/ens85bd_russ_616.csv') # 3 +3
df_4 = expand('sub_dir_team/enspreds103_12mdl_512-256_wtth0.45_leak_shai_593.csv') # 2+2
df_5 = expand('sub_dir_team/hill_m94d_dmytro_627.csv') # 3
df_6 = expand('sub_dir_team/voted_5_d_kevin_602.csv') #2 +2
df_7 = expand('sub_dir_team/hill_b93d_l2_615update.csv') #1
df_8 = expand('sub_dir_team/submission_loss_5fold_mean_2_GAP_chrs_602.csv') #2
df_9 = expand('sub_dir_team/hill_b92d_l2_615.csv') #1
df_10 = expand('sub_dir_team/hill_m92d_dmytro_617.csv') # 3
# enstw36
# df_1 = expand('sub_dir_team/leak_brian_tommy_en_res34swa_re50xt_re101xtswa_wrn_4.8_562.csv') #1
# df_2 = expand( 'sub_dir_team/Christof_blend_4_580.csv') #3
# df_3 = expand('sub_dir_team/ens85bd_russ_616.csv') #3
# df_4 = expand('sub_dir_team/enspreds103_12mdl_512-256_wtth0.45_leak_shai_593.csv') #2
# df_5 = expand('sub_dir_team/hill_m92d_dmytro_617.csv') # 3
# df_6 = expand('sub_dir_team/voted_5_d_kevin_602.csv') #2
# df_7 = expand('sub_dir_team/hill_b92d_l2_615.csv') #1
#=======================================================================================================================
# Visualize distribution
#=======================================================================================================================
# list =[0,1,2,3,4,5,6,7]
# colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k', 'orange']
# w=0
# for i in list:
# x = np.arange(0, 28, 1)
# plt.bar(x+w, column_sum[i],width = 0.08, color = colors[i], label=sub_name[i], )
# w=w+0.09
# plt.legend()
# plt.grid(True)
# plt.yscale('log')
# plt.show()
#=======================================================================================================================
#=======================================================================================================================
sum = df_1[:, 1:]*2 + \
df_2[:, 1:]*5 + \
df_3[:, 1:]*6 + \
df_4[:, 1:]*4 + \
df_5[:, 1:]*3 + \
df_6[:, 1:]*4 + \
df_7[:, 1:]*1 + \
df_8[:, 1:]*2 + \
df_9[:, 1:]*1 + \
df_10[:, 1:]*3
vote = 15 #7 15/31
#=======================================================================================================================
# Selecting most probable label for nan rows
#=======================================================================================================================
# sum_tmp = sum.copy()
# for i,row in enumerate(sum):
# #print (str(row))
# #print(max(row))
# #print(row.argmax(axis=0))
# row_max_idx = row.argmax(axis=0)
# if max(row)<vote:
# #row[row_max_idx] = vote
# sum[i,row_max_idx] = vote
# #print(str(row))
# diff = sum-sum_tmp
#=======================================================================================================================
vote_sub0 = np.where(sum[:,0] >= vote, 1, 0) #high
vote_sub1 = np.where(sum[:,1] >= vote, 1, 0)
vote_sub2 = np.where(sum[:,2] >= vote, 1, 0)
vote_sub3 = np.where(sum[:,3] >= vote, 1, 0)
vote_sub4 = np.where(sum[:,4] >= vote, 1, 0)
vote_sub5 = np.where(sum[:,5] >= vote, 1, 0)
vote_sub6 = np.where(sum[:,6] >= vote, 1, 0)
vote_sub7 = np.where(sum[:,7] >= vote, 1, 0)
vote_sub8 = np.where(sum[:,8] >= vote, 1, 0) #low
vote_sub9 = np.where(sum[:,9] >= vote, 1, 0) #low
vote_sub10 = np.where(sum[:,10] >= vote, 1, 0) #low
vote_sub11 = np.where(sum[:,11] >= vote, 1, 0)
vote_sub12 = np.where(sum[:,12] >= vote, 1, 0)
vote_sub13 = np.where(sum[:,13] >= vote, 1, 0)
vote_sub14 = np.where(sum[:,14] >= vote, 1, 0)
vote_sub15 = np.where(sum[:,15] >= vote + 2, 1, 0) #low
vote_sub16 = np.where(sum[:,16] >= vote, 1, 0)
vote_sub17 = np.where(sum[:,17] >= vote, 1, 0)
vote_sub18 = np.where(sum[:,18] >= vote, 1, 0)
vote_sub19 = np.where(sum[:,19] >= vote, 1, 0)
vote_sub20 = np.where(sum[:,20] >= vote, 1, 0)
vote_sub21 = np.where(sum[:,21] >= vote, 1, 0)
vote_sub22 = np.where(sum[:,22] >= vote, 1, 0)
vote_sub23 = np.where(sum[:,23] >= vote, 1, 0)
vote_sub24 = np.where(sum[:,24] >= vote, 1, 0)
vote_sub25 = np.where(sum[:,25] >= vote, 1, 0) #high
vote_sub26 = np.where(sum[:,26] >= vote, 1, 0)
vote_sub27 = np.where(sum[:,27] >= vote, 1, 0) #low
vote_sub = np.column_stack((vote_sub0, vote_sub1, vote_sub2, vote_sub3,
vote_sub4, vote_sub5, vote_sub6, vote_sub7,
vote_sub8, vote_sub9, vote_sub10, vote_sub11,
vote_sub12, vote_sub13, vote_sub14, vote_sub15,
vote_sub16, vote_sub17, vote_sub18, vote_sub19,
vote_sub20, vote_sub21, vote_sub22, vote_sub23,
vote_sub24, vote_sub25, vote_sub26, vote_sub27)
)
#======================================================================================================================
# prepare submission format
#======================================================================================================================
submit = pd.read_csv(SAMPLE)
prediction = []
for row in tqdm(range(submit.shape[0])):
str_label = ''
for col in range(vote_sub.shape[1]):
if (vote_sub[row, col] < 1):
str_label += ''
else:
str_label += str(col) + ' '
prediction.append(str_label.strip())
submit['Predicted'] = np.array(prediction)
#submit.to_csv('sub_dir_team/test.csv', index=False)
submit.to_csv('sub_dir_team/enstw40_642blend.csv', index=False)
#=======================================================================================================================
| 41.102564 | 120 | 0.483968 |
f767ae544f325588028db406518572d93b2dae39
| 244 |
py
|
Python
|
nevergrad/functions/powersystems/__init__.py
|
kwonhur/nevergrad
|
7821ff2ddf46ed7ece61529449f770d32179433c
|
[
"MIT"
] | 3,217 |
2018-12-20T05:41:46.000Z
|
2022-03-31T10:22:54.000Z
|
nevergrad/functions/powersystems/__init__.py
|
kwonhur/nevergrad
|
7821ff2ddf46ed7ece61529449f770d32179433c
|
[
"MIT"
] | 590 |
2018-12-20T21:03:38.000Z
|
2022-03-31T04:38:45.000Z
|
nevergrad/functions/powersystems/__init__.py
|
kwonhur/nevergrad
|
7821ff2ddf46ed7ece61529449f770d32179433c
|
[
"MIT"
] | 333 |
2018-12-20T08:38:03.000Z
|
2022-03-28T06:23:53.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .core import PowerSystem as PowerSystem
| 34.857143 | 71 | 0.77459 |
f767b5a02834bc43fda4cafc3ce2038fc3321d25
| 830 |
py
|
Python
|
lambda_function.py
|
Moesif/moesif-aws-lambda-python
|
b588bc819568e08eec2f8eb9d2bd6a1e15f945a5
|
[
"Apache-2.0"
] | 1 |
2021-01-21T10:38:33.000Z
|
2021-01-21T10:38:33.000Z
|
lambda_function.py
|
Moesif/moesif-aws-lambda-python
|
b588bc819568e08eec2f8eb9d2bd6a1e15f945a5
|
[
"Apache-2.0"
] | 2 |
2019-12-21T01:14:19.000Z
|
2020-02-20T02:03:23.000Z
|
lambda_function.py
|
Moesif/moesif-aws-lambda-python
|
b588bc819568e08eec2f8eb9d2bd6a1e15f945a5
|
[
"Apache-2.0"
] | 1 |
2021-01-21T10:38:35.000Z
|
2021-01-21T10:38:35.000Z
|
from moesif_aws_lambda.middleware import *
import os
import requests
import json
moesif_options = {
'LOG_BODY': True,
'DEBUG': True,
}
@MoesifLogger(moesif_options)
def lambda_handler(event, context):
# Outgoing API call to third parties like Github / Stripe or to your own dependencies
start_capture_outgoing(moesif_options)
third_party = requests.get('https://httpbin.org/ip', json=json.dumps({'test': 2}),
headers={"content-type": "text", "Authorization": "Bearer sdf4854wer"},
auth=('Basic', "testauth"))
return {
'statusCode': 200,
'isBase64Encoded': False,
'body': {
'msg': 'Hello from Lambda!'
},
'headers': {
'Content-Type': 'application/json'
}
}
| 27.666667 | 102 | 0.580723 |
f767b7366fdf2c2f742dc6da7890800845ed5799
| 471 |
py
|
Python
|
tests/validators/test_interface.py
|
funkypenguin/connaisseur
|
37d85332e519454a017b02ecb1cd77913f51f70a
|
[
"Apache-2.0"
] | 281 |
2020-08-07T16:21:07.000Z
|
2022-03-31T11:08:42.000Z
|
tests/validators/test_interface.py
|
funkypenguin/connaisseur
|
37d85332e519454a017b02ecb1cd77913f51f70a
|
[
"Apache-2.0"
] | 445 |
2020-09-02T10:41:18.000Z
|
2022-03-31T10:19:29.000Z
|
tests/validators/test_interface.py
|
funkypenguin/connaisseur
|
37d85332e519454a017b02ecb1cd77913f51f70a
|
[
"Apache-2.0"
] | 38 |
2020-10-28T08:05:04.000Z
|
2022-03-09T02:06:40.000Z
|
import pytest
import connaisseur.validators.interface as vi
def test_init():
assert vi.ValidatorInterface("")
@pytest.mark.asyncio
async def test_validate():
with pytest.raises(NotImplementedError):
assert await vi.ValidatorInterface("").validate(None)
def test_healthy():
with pytest.raises(NotImplementedError):
assert vi.ValidatorInterface("").healthy is True
def test_str():
assert str(vi.ValidatorInterface("test")) == "test"
| 21.409091 | 61 | 0.728238 |
f767d5332f355479a0656200357eba773a7d3227
| 2,553 |
py
|
Python
|
tests/test_engine/test_projection/test_projection_slice.py
|
gitter-badger/MontyDB
|
849d03dc2cfed35739481e9acb1ff0bd8095c91b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_engine/test_projection/test_projection_slice.py
|
gitter-badger/MontyDB
|
849d03dc2cfed35739481e9acb1ff0bd8095c91b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_engine/test_projection/test_projection_slice.py
|
gitter-badger/MontyDB
|
849d03dc2cfed35739481e9acb1ff0bd8095c91b
|
[
"BSD-3-Clause"
] | null | null | null |
def test_projection_slice_1(monty_proj, mongo_proj):
docs = [
{"a": [{"b": 1}, {"b": 3}, {"b": 0}, {"b": 8}]}
]
spec = {"a.b": {"$gt": 2}}
proj = {"a.b": {"$slice": 2}}
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_projection_slice_2(monty_proj, mongo_proj):
docs = [
{"a": [0, 1, 2, 5, 6]},
{"a": [8, 1, 5]},
{"a": [9, 0, 0, 2, 6]},
]
spec = {}
proj = {"a": {"$slice": [1, 4]}}
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert mongo_c.count() == 3
assert monty_c.count() == mongo_c.count()
for i in range(3):
assert next(mongo_c) == next(monty_c)
def test_projection_slice_3(monty_proj, mongo_proj):
docs = [
{"a": [0, 1, 2, 5, 6]},
{"a": [8, 1, 5]},
{"a": [9, 0, 0, 2, 6]},
]
spec = {}
proj = {"a": {"$slice": -3}}
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert mongo_c.count() == 3
assert monty_c.count() == mongo_c.count()
for i in range(3):
assert next(mongo_c) == next(monty_c)
def test_projection_slice_4(monty_proj, mongo_proj):
docs = [
{"a": [0, 1, 2, 3, 4, 5, 6, 7]}
]
spec = {}
proj = {"a": {"$slice": [5, 4]}}
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_projection_slice_5(monty_proj, mongo_proj):
docs = [
{"a": [0, 1, 2, 3, 4, 5, 6, 7]}
]
spec = {}
proj = {"a": {"$slice": [-5, 4]}}
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_projection_slice_6(monty_proj, mongo_proj):
docs = [
{"a": [0, 1, 2, 3, 4, 5, 6, 7], "x": 100}
]
spec = {}
def run(proj):
monty_c = monty_proj(docs, spec, proj)
mongo_c = mongo_proj(docs, spec, proj)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
proj = {"a": {"$slice": [-5, 4]}, "x": 1}
run(proj)
proj = {"a": {"$slice": [-5, 4]}, "x": 0}
run(proj)
| 24.786408 | 55 | 0.527223 |
f7681adf5c3a92dc85f321c2470189af8a01603a
| 1,105 |
py
|
Python
|
demo/test.py
|
tkerola/matrix_calculus
|
21c4f262b501337996453280d75d06a47a9fd135
|
[
"MIT"
] | 1 |
2019-04-18T03:48:08.000Z
|
2019-04-18T03:48:08.000Z
|
demo/test.py
|
tkerola/matrix_calculus
|
21c4f262b501337996453280d75d06a47a9fd135
|
[
"MIT"
] | null | null | null |
demo/test.py
|
tkerola/matrix_calculus
|
21c4f262b501337996453280d75d06a47a9fd135
|
[
"MIT"
] | null | null | null |
from matrix_calculus import *
from matrix_calculus.matrix_massage import massage2canonical
from matrix_calculus.show_latex import show_latex
def main():
A = Variable("A")
B = Variable("B")
C = Variable("C")
D = Variable("D")
Y = Variable("Y")
X = Variable("X")
expr = Tr(A*X*B)
wrt = X
print("Jacobian")
dX = show_derivation(expr, wrt)
show_latex(dX, expr, wrt)
expr = Tr(A*X.T*B*X*C)
wrt = X
print("Jacobian")
dX = show_derivation(expr, wrt)
show_latex(dX, expr, wrt)
expr = Tr((Y-D*X).T*(Y-D*X))
wrt = D
print("Jacobian")
dX = show_derivation(expr, wrt)
show_latex(dX, expr, wrt)
print("Hessian")
dX = show_derivation(expr, wrt, hessian=True)
show_latex(dX, expr, wrt)
show_latex(dX, expr, wrt, hessian=True)
def show_derivation(expr, wrt, hessian=False):
print("{} = ".format(DifferentialExpr(expr, wrt)))
dX = d(expr, wrt, hessian=hessian)
print(dX)
print("-->")
dX = massage2canonical(dX)
print(dX)
print("")
return dX
if __name__ == '__main__':
main()
| 21.25 | 60 | 0.60905 |
f7683fd845e86f68f741962cd7eba1909b6d17c6
| 4,561 |
py
|
Python
|
yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
|
rbbratta/yardstick
|
7d1ffcd8eb41e9e4a09ed469b00ae4cbf2715529
|
[
"Apache-2.0"
] | null | null | null |
yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
|
rbbratta/yardstick
|
7d1ffcd8eb41e9e4a09ed469b00ae4cbf2715529
|
[
"Apache-2.0"
] | null | null | null |
yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
|
rbbratta/yardstick
|
7d1ffcd8eb41e9e4a09ed469b00ae4cbf2715529
|
[
"Apache-2.0"
] | null | null | null |
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from __future__ import absolute_import
import logging
import subprocess
import traceback
import yardstick.ssh as ssh
from yardstick.benchmark.scenarios.availability.attacker.baseattacker import \
BaseAttacker
LOG = logging.getLogger(__name__)
def _execute_shell_command(command, stdin=None):
"""execute shell script with error handling"""
exitcode = 0
output = []
try:
output = subprocess.check_output(command, stdin=stdin, shell=True)
except Exception:
exitcode = -1
output = traceback.format_exc()
LOG.error("exec command '%s' error:\n ", command)
LOG.error(traceback.format_exc())
return exitcode, output
class BaremetalAttacker(BaseAttacker):
__attacker_type__ = 'bare-metal-down'
def setup(self):
LOG.debug("config:%s context:%s", self._config, self._context)
host = self._context.get(self._config['host'], None)
self.connection = ssh.SSH.from_node(host, defaults={"user": "root"})
self.connection.wait(timeout=600)
LOG.debug("ssh host success!")
self.host_ip = host['ip']
self.ipmi_ip = host.get("ipmi_ip", None)
self.ipmi_user = host.get("ipmi_user", "root")
self.ipmi_pwd = host.get("ipmi_pwd", None)
self.fault_cfg = BaseAttacker.attacker_cfgs.get('bare-metal-down')
self.check_script = self.get_script_fullpath(
self.fault_cfg['check_script'])
self.recovery_script = self.get_script_fullpath(
self.fault_cfg['recovery_script'])
if self.check():
self.setup_done = True
def check(self):
with open(self.check_script, "r") as stdin_file:
exit_status, stdout, stderr = self.connection.execute(
"/bin/sh -s {0} -W 10".format(self.host_ip),
stdin=stdin_file)
LOG.debug("check ret: %s out:%s err:%s",
exit_status, stdout, stderr)
if not stdout or "running" not in stdout:
LOG.info("the host (ipmi_ip:%s) is not running!", self.ipmi_ip)
return False
return True
def inject_fault(self):
exit_status, stdout, stderr = self.connection.execute(
"shutdown -h now")
LOG.debug("inject fault ret: %s out:%s err:%s",
exit_status, stdout, stderr)
if not exit_status:
LOG.info("inject fault success")
def recover(self):
jump_host_name = self._config.get("jump_host", None)
self.jump_connection = None
if jump_host_name is not None:
host = self._context.get(jump_host_name, None)
LOG.debug("jump_host ip:%s user:%s", host['ip'], host['user'])
self.jump_connection = ssh.SSH.from_node(
host,
# why do we allow pwd for password?
defaults={"user": "root", "password": host.get("pwd")}
)
self.jump_connection.wait(timeout=600)
LOG.debug("ssh jump host success!")
if self.jump_connection is not None:
with open(self.recovery_script, "r") as stdin_file:
self.jump_connection.execute(
"/bin/bash -s {0} {1} {2} {3}".format(
self.ipmi_ip, self.ipmi_user, self.ipmi_pwd, "on"),
stdin=stdin_file)
else:
_execute_shell_command(
"/bin/bash -s {0} {1} {2} {3}".format(
self.ipmi_ip, self.ipmi_user, self.ipmi_pwd, "on"),
stdin=open(self.recovery_script, "r"))
def _test(): # pragma: no cover
host = {
"ipmi_ip": "10.20.0.5",
"ipmi_user": "root",
"ipmi_pwd": "123456",
"ip": "10.20.0.5",
"user": "root",
"key_filename": "/root/.ssh/id_rsa"
}
context = {"node1": host}
attacker_cfg = {
'fault_type': 'bear-metal-down',
'host': 'node1',
}
ins = BaremetalAttacker(attacker_cfg, context)
ins.setup()
ins.inject_fault()
if __name__ == '__main__': # pragma: no cover
_test()
| 34.55303 | 78 | 0.578601 |
f7684320cbacbbc0509f0ffb9327527104eb6e74
| 5,381 |
py
|
Python
|
backend/src/baserow/contrib/database/api/rows/serializers.py
|
ericderace/baserow
|
7b35e81f75166d914d07ef4ad0c30c625b6bb396
|
[
"MIT"
] | null | null | null |
backend/src/baserow/contrib/database/api/rows/serializers.py
|
ericderace/baserow
|
7b35e81f75166d914d07ef4ad0c30c625b6bb396
|
[
"MIT"
] | 6 |
2021-04-08T22:03:06.000Z
|
2022-01-13T03:38:17.000Z
|
backend/src/baserow/contrib/database/api/rows/serializers.py
|
ericderace/baserow
|
7b35e81f75166d914d07ef4ad0c30c625b6bb396
|
[
"MIT"
] | null | null | null |
import logging
from rest_framework import serializers
from baserow.api.utils import get_serializer_class
from baserow.api.serializers import get_example_pagination_serializer_class
from baserow.core.utils import model_default_values, dict_to_object
from baserow.contrib.database.fields.registries import field_type_registry
logger = logging.getLogger(__name__)
class RowSerializer(serializers.ModelSerializer):
class Meta:
fields = ('id', 'order',)
extra_kwargs = {
'id': {'read_only': True},
'order': {'read_only': True}
}
def get_row_serializer_class(model, base_class=None, is_response=False,
field_ids=None):
"""
Generates a Django rest framework model serializer based on the available fields
that belong to this model. For each table field, used to generate this serializer,
a serializer field will be added via the `get_serializer_field` method of the field
type.
:param model: The model for which to generate a serializer.
:type model: Model
:param base_class: The base serializer class that will be extended when
generating the serializer. By default this is a regular ModelSerializer.
:type base_class: ModelSerializer
:param is_response: Indicates if the serializer is going to be used for a response
instead of handling input data. If that is the case other serializer fields
might be used depending on the field type.
:type is_response: bool
:param field_ids: If provided only the field ids in the list will be included in
the serializer. By default all the fields of the model are going to be
included. Note that the field id must exist in the model in order to work.
:type field_ids: list or None
:return: The generated serializer.
:rtype: ModelSerializer
"""
field_objects = model._field_objects
field_names = [
field['name']
for field in field_objects.values()
if field_ids is None or field['field'].id in field_ids
]
field_overrides = {
field['name']:
field['type'].get_response_serializer_field(field['field'])
if is_response else
field['type'].get_serializer_field(field['field'])
for field in field_objects.values()
if field_ids is None or field['field'].id in field_ids
}
return get_serializer_class(model, field_names, field_overrides, base_class)
def get_example_row_serializer_class(add_id=False):
"""
Generates a serializer containing a field for each field type. It is only used for
example purposes in the openapi documentation.
:param add_id: Indicates whether the id field should be added. This could for
example differ for request or response documentation.
:type add_id: bool
:return: Generated serializer containing a field for each field type.
:rtype: Serializer
"""
if not hasattr(get_example_row_serializer_class, 'cache'):
get_example_row_serializer_class.cache = {}
class_name = (
'ExampleRowResponseSerializer'
if add_id else
'ExampleRowRequestSerializer'
)
if class_name in get_example_row_serializer_class.cache:
return get_example_row_serializer_class.cache[class_name]
fields = {}
if add_id:
fields['id'] = serializers.IntegerField(
read_only=True,
help_text='The unique identifier of the row in the table.'
)
fields['order'] = serializers.DecimalField(
max_digits=40, decimal_places=20, required=False,
help_text='Indicates the position of the row, lowest first and highest '
'last.'
)
field_types = field_type_registry.registry.values()
if len(field_types) == 0:
logger.warning('The field types appear to be empty. This module is probably '
'imported before the fields have been registered.')
for i, field_type in enumerate(field_types):
# In order to generate a serializer we need a model instance. This method is
# called before Django has been loaded so it will result in errors when
# creating an instance. Therefore we create an object containing the default
# field values of the model. With the object we can generate the example
# serializer.
defaults = model_default_values(field_type.model_class)
instance = dict_to_object(defaults)
kwargs = {
'help_text': f'This field represents the `{field_type.type}` field. The '
f'number in field_{i + 1} is in a normal request or response '
f'the id of the field. '
f'{field_type.get_serializer_help_text(instance)}'
}
get_field_method = \
'get_response_serializer_field' if add_id else 'get_serializer_field'
serializer_field = getattr(field_type, get_field_method)(instance, **kwargs)
fields[f'field_{i + 1}'] = serializer_field
class_object = type(class_name, (serializers.Serializer,), fields)
get_example_row_serializer_class.cache[class_name] = class_object
return class_object
example_pagination_row_serializer_class = get_example_pagination_serializer_class(
get_example_row_serializer_class(True)
)
| 39.566176 | 87 | 0.688162 |
f7686beb9cf80d0dc4ae040c107009ad91602ab7
| 534 |
py
|
Python
|
sanity/management/commands/update_index.py
|
TIBHannover/arxiv_preserver
|
aefdacaa9d1c4c13b51e1812e6218c0025ab0a11
|
[
"MIT"
] | null | null | null |
sanity/management/commands/update_index.py
|
TIBHannover/arxiv_preserver
|
aefdacaa9d1c4c13b51e1812e6218c0025ab0a11
|
[
"MIT"
] | null | null | null |
sanity/management/commands/update_index.py
|
TIBHannover/arxiv_preserver
|
aefdacaa9d1c4c13b51e1812e6218c0025ab0a11
|
[
"MIT"
] | null | null | null |
from django.core.management.base import BaseCommand, CommandError
import sanity.models as models
import re
import time
import datetime
import os
from django.db.models import Max
class Command(BaseCommand):
help = "Closes the specified poll for voting"
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
start = time.time()
models.PaperIndex.generate()
end = time.time()
self.stdout.write(self.style.SUCCESS("Index created in {}s".format(end - start)))
| 22.25 | 89 | 0.692884 |
f768f53ed3d818d4f87f83c7c8808f40808edebc
| 2,296 |
py
|
Python
|
test/lmp/script/gen_txt/test_parse_args.py
|
ProFatXuanAll/char-RNN
|
531f101b3d1ba20bafd28ca060aafe6f583d1efb
|
[
"Beerware"
] | null | null | null |
test/lmp/script/gen_txt/test_parse_args.py
|
ProFatXuanAll/char-RNN
|
531f101b3d1ba20bafd28ca060aafe6f583d1efb
|
[
"Beerware"
] | null | null | null |
test/lmp/script/gen_txt/test_parse_args.py
|
ProFatXuanAll/char-RNN
|
531f101b3d1ba20bafd28ca060aafe6f583d1efb
|
[
"Beerware"
] | null | null | null |
"""Test parsing arguments.
Test target:
- :py:meth:`lmp.script.gen_txt.parse_args`.
"""
import lmp.infer
import lmp.script.gen_txt
from lmp.infer import Top1Infer, TopKInfer, TopPInfer
def test_top_1_parse_results(ckpt: int, exp_name: str, max_seq_len: int, seed: int) -> None:
"""Must correctly parse all arguments for :py:class:`lmp.infer.Top1Infer`."""
txt = 'Hello world'
args = lmp.script.gen_txt.parse_args(
argv=[
Top1Infer.infer_name,
'--ckpt',
str(ckpt),
'--exp_name',
exp_name,
'--max_seq_len',
str(max_seq_len),
'--seed',
str(seed),
'--txt',
txt,
]
)
assert args.ckpt == ckpt
assert args.exp_name == exp_name
assert args.infer_name == Top1Infer.infer_name
assert args.max_seq_len == max_seq_len
assert args.seed == seed
assert args.txt == txt
def test_top_k_parse_results(ckpt: int, exp_name: str, max_seq_len: int, seed: int) -> None:
"""Must correctly parse all arguments for :py:class:`lmp.infer.TopKInfer`."""
k = 5
txt = 'Hello world'
args = lmp.script.gen_txt.parse_args(
argv=[
TopKInfer.infer_name,
'--ckpt',
str(ckpt),
'--exp_name',
exp_name,
'--k',
str(k),
'--max_seq_len',
str(max_seq_len),
'--seed',
str(seed),
'--txt',
txt,
]
)
assert args.ckpt == ckpt
assert args.exp_name == exp_name
assert args.infer_name == TopKInfer.infer_name
assert args.k == k
assert args.max_seq_len == max_seq_len
assert args.seed == seed
assert args.txt == txt
def test_top_p_parse_results(ckpt: int, exp_name: str, max_seq_len: int, seed: int) -> None:
"""Must correctly parse all arguments for :py:class:`lmp.infer.TopPInfer`."""
p = 0.9
txt = 'Hello world'
args = lmp.script.gen_txt.parse_args(
argv=[
TopPInfer.infer_name,
'--ckpt',
str(ckpt),
'--exp_name',
exp_name,
'--max_seq_len',
str(max_seq_len),
'--p',
str(p),
'--seed',
str(seed),
'--txt',
txt,
]
)
assert args.ckpt == ckpt
assert args.exp_name == exp_name
assert args.infer_name == TopPInfer.infer_name
assert args.max_seq_len == max_seq_len
assert args.p == p
assert args.seed == seed
assert args.txt == txt
| 23.916667 | 92 | 0.616289 |
f768f579d3e2279e1691f6fe0d0a4fc720a5ba60
| 50,104 |
py
|
Python
|
stage/test_cdh_stages.py
|
streamsets/datacollector-tests-external
|
6f255b5e7496deeef333b57a5e9df4911ba3ef00
|
[
"Apache-2.0"
] | 1 |
2020-04-14T03:01:51.000Z
|
2020-04-14T03:01:51.000Z
|
stage/test_cdh_stages.py
|
streamsets/test
|
1ead70179ee92a4acd9cfaa33c56a5a9e233bf3d
|
[
"Apache-2.0"
] | null | null | null |
stage/test_cdh_stages.py
|
streamsets/test
|
1ead70179ee92a4acd9cfaa33c56a5a9e233bf3d
|
[
"Apache-2.0"
] | 1 |
2019-09-14T08:28:36.000Z
|
2019-09-14T08:28:36.000Z
|
# Copyright 2017 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import string
import time
from datetime import datetime
from decimal import Decimal
import pytest
import sqlalchemy
from streamsets.testframework.markers import cluster, sdc_min_version
from streamsets.testframework.utils import get_random_string, Version
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Spark executor was renamed in SDC-10697, so we need to reference it by name.
SPARK_EXECUTOR_STAGE_NAME = 'com_streamsets_datacollector_pipeline_executor_spark_SparkDExecutor'
# Specify a port for SDC RPC stages to use.
SDC_RPC_PORT = 20000
SNAPSHOT_TIMEOUT_SEC = 120
DEFAULT_IMPALA_DB = 'default'
DEFAULT_KUDU_PORT = 7051
@cluster('cdh')
def test_kudu_destination(sdc_builder, sdc_executor, cluster):
"""Simple Dev Raw Data Source to Kudu pipeline.
dev_raw_data_source >> kudu
"""
if not hasattr(cluster, 'kudu'):
pytest.skip('Kudu tests only run against clusters with the Kudu service present.')
# Generate some data.
tour_de_france_contenders = [dict(favorite_rank=1, name='Chris Froome', wins=3),
dict(favorite_rank=2, name='Greg LeMond', wins=3),
dict(favorite_rank=4, name='Vincenzo Nibali', wins=1),
dict(favorite_rank=3, name='Nairo Quintana', wins=0)]
raw_data = ''.join([json.dumps(contender) for contender in tour_de_france_contenders])
# For a little more coverage, we'll map the "favorite_rank" record field to the "rank" column in Kudu.
# These rankings are Dima's opinion and not reflective of the views of StreamSets, Inc.
field_to_column_mapping = [dict(field='/favorite_rank', columnName='rank')]
kudu_table_name = get_random_string(string.ascii_letters, 10)
kudu_master_address = '{}:{}'.format(cluster.server_host, DEFAULT_KUDU_PORT)
# Build the pipeline.
builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='JSON',
raw_data=raw_data)
kudu = builder.add_stage('Kudu',
type='destination').set_attributes(table_name='{}.{}'.format('impala::default',
kudu_table_name),
default_operation='INSERT',
field_to_column_mapping=field_to_column_mapping)
dev_raw_data_source >> kudu
pipeline = builder.build().configure_for_environment(cluster)
pipeline.delivery_guarantee = 'AT_MOST_ONCE'
# We want to write data once and then stop, but Dev Raw Data Source will keep looping, so we set the rate limit to
# a low value and will rely upon pipeline metrics to know when to stop the pipeline.
pipeline.rate_limit = 4
metadata = sqlalchemy.MetaData()
tdf_contenders_table = sqlalchemy.Table(kudu_table_name,
metadata,
sqlalchemy.Column('rank', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String),
sqlalchemy.Column('wins', sqlalchemy.Integer),
impala_partition_by='HASH PARTITIONS 16',
impala_stored_as='KUDU',
impala_table_properties={
'kudu.master_addresses': kudu_master_address,
'kudu.num_tablet_replicas': '1'
})
try:
logger.info('Creating Kudu table %s ...', kudu_table_name)
engine = cluster.kudu.engine
tdf_contenders_table.create(engine)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_batch_count(len(tour_de_france_contenders))
sdc_executor.stop_pipeline(pipeline)
connection = engine.connect()
result = connection.execute(sqlalchemy.sql.select([tdf_contenders_table]).order_by('rank'))
assert list(result) == [tuple([item['favorite_rank'], item['name'], item['wins']])
for item in sorted(tour_de_france_contenders, key=lambda key: key['favorite_rank'])]
finally:
logger.info('Dropping Kudu table %s ...', kudu_table_name)
tdf_contenders_table.drop(engine)
@cluster('cdh')
def test_kudu_destination_unixtime_micro_datatype(sdc_builder, sdc_executor, cluster):
"""
Test Kudu's UNIXTIME_MICRO data type support.
dev_raw_data_source >> kudu
"""
if not hasattr(cluster, 'kudu'):
pytest.skip('Kudu tests only run against clusters with the Kudu service present.')
if Version(cluster.version) < Version('cdh5.12.0'):
pytest.skip('Test requires CDH 5.12.0+ to run')
# Generate some data. Kudu does not store microsecond so set it 0.
now = datetime.now().replace(microsecond=0)
now_millisecond = time.mktime(now.timetuple()) * 1000
input_data = [dict(id=1, time=now_millisecond)]
raw_data = ''.join([json.dumps(contender) for contender in input_data])
field_to_column_mapping = [dict(field='/id', columnName='id'),
dict(field='/time', columnName='unixtime_micro')]
kudu_table_name = get_random_string(string.ascii_letters, 10)
kudu_master_address = f'{cluster.server_host}:{DEFAULT_KUDU_PORT}'
# Build the pipeline.
builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='JSON',
raw_data=raw_data)
kudu = builder.add_stage('Kudu',
type='destination').set_attributes(table_name='{}.{}'.format('impala::default',
kudu_table_name),
default_operation='INSERT',
field_to_column_mapping=field_to_column_mapping)
dev_raw_data_source >> kudu
pipeline = builder.build().configure_for_environment(cluster)
pipeline.delivery_guarantee = 'AT_MOST_ONCE'
# We want to write data once and then stop, but Dev Raw Data Source will keep looping, so we set the rate limit to
# a low value and will rely upon pipeline metrics to know when to stop the pipeline.
pipeline.rate_limit = 4
metadata = sqlalchemy.MetaData()
test_table = sqlalchemy.Table(kudu_table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('unixtime_micro', sqlalchemy.TIMESTAMP),
impala_partition_by='HASH PARTITIONS 16',
impala_stored_as='KUDU',
impala_table_properties={
'kudu.master_addresses': kudu_master_address,
'kudu.num_tablet_replicas': '1'
})
try:
logger.info('Creating Kudu table %s ...', kudu_table_name)
engine = cluster.kudu.engine
test_table.create(engine)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_batch_count(len(input_data))
sdc_executor.stop_pipeline(pipeline)
connection = engine.connect()
result = connection.execute(sqlalchemy.sql.select([test_table])).fetchone()
assert list(result) == [1, now]
finally:
logger.info('Dropping Kudu table %s ...', kudu_table_name)
test_table.drop(engine)
@cluster('cdh')
@sdc_min_version('3.6.0')
def test_kudu_destination_decimal_type(sdc_builder, sdc_executor, cluster):
"""Simple Dev Raw Data Source to Kudu pipeline inserting column of decimal type and checking later on
decimal type is correctly stored by querying Kudu database
dev_raw_data_source >> kudu
"""
if not hasattr(cluster, 'kudu'):
pytest.skip('Kudu tests only run against clusters with the Kudu service present.')
if not cluster.kudu.version >= '1.7.0':
pytest.skip('Test only designed to run on Kudu version >= 1.7.0')
# Generate some data.
tour_de_france_contenders = [dict(favorite_rank=1, name='Chris Froome', wins=3, weight=153.22),
dict(favorite_rank=2, name='Greg LeMond', wins=3, weight=158.73),
dict(favorite_rank=4, name='Vincenzo Nibali', wins=1, weight=144),
dict(favorite_rank=3, name='Nairo Quintana', wins=0, weight=165.34)]
raw_data = '\n'.join([json.dumps(contender) for contender in tour_de_france_contenders])
field_to_column_mapping = [dict(field='/favorite_rank', columnName='rank'),
dict(field='/name', columnName='name'),
dict(field='/wins', columnName='wins'),
dict(field='/weight', columnName='weight')]
kudu_table_name = get_random_string(string.ascii_letters, 10)
kudu_master_address = '{}:{}'.format(cluster.server_host, DEFAULT_KUDU_PORT)
# Build the pipeline.
builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='JSON',
raw_data=raw_data)
kudu = builder.add_stage('Kudu',
type='destination').set_attributes(table_name='{}.{}'.format('impala::default',
kudu_table_name),
default_operation='INSERT',
field_to_column_mapping=field_to_column_mapping)
dev_raw_data_source >> kudu
pipeline = builder.build().configure_for_environment(cluster)
pipeline.delivery_guarantee = 'AT_MOST_ONCE'
# We want to write data once and then stop, but Dev Raw Data Source will keep looping, so we set the rate limit to
# a low value and will rely upon pipeline metrics to know when to stop the pipeline.
pipeline.rate_limit = 4
metadata = sqlalchemy.MetaData()
tdf_contenders_table = sqlalchemy.Table(kudu_table_name,
metadata,
sqlalchemy.Column('rank', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String),
sqlalchemy.Column('wins', sqlalchemy.Integer),
sqlalchemy.Column('weight', sqlalchemy.DECIMAL(5, 2)),
impala_partition_by='HASH PARTITIONS 16',
impala_stored_as='KUDU',
impala_table_properties={
'kudu.master_addresses': kudu_master_address,
'kudu.num_tablet_replicas': '1'
})
try:
logger.info('Creating Kudu table %s ...', kudu_table_name)
engine = cluster.kudu.engine
tdf_contenders_table.create(engine)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_batch_count(len(tour_de_france_contenders))
sdc_executor.stop_pipeline(pipeline)
connection = engine.connect()
result = connection.execute(sqlalchemy.sql.select([tdf_contenders_table]).order_by('rank'))
result_list = list(result)
sorted_tour_de_france_contenders = [tuple([item['favorite_rank'], item['name'], item['wins'],
round(Decimal(item['weight']), 2)])
for item in sorted(tour_de_france_contenders,
key=lambda key: key['favorite_rank'])]
assert result_list == sorted_tour_de_france_contenders
finally:
logger.info('Dropping Kudu table %s ...', kudu_table_name)
tdf_contenders_table.drop(engine)
@cluster('cdh')
@sdc_min_version('2.7.0.0')
def test_kudu_lookup_apply_default(sdc_builder, sdc_executor, cluster):
"""
Test when row is found which matches with primary key, but its column that lookup processor needs to return
doesn't have value.
When default value is configured, apply the value.
dev_raw_data_source >> record_deduplicator >> kudu >> trash
record_deduplicator >> to_error
"""
if not hasattr(cluster, 'kudu'):
pytest.skip('Kudu tests only run against clusters with the Kudu service present.')
tour_de_france_contenders = [dict(favorite_rank=1),
dict(favorite_rank=2)]
raw_data = ''.join([json.dumps(contender) for contender in tour_de_france_contenders])
key_columns_mapping = [dict(field='/favorite_rank', columnName='rank')]
column_to_output_field_mapping = [dict(columnName='name', field='/name', defaultValue=None),
dict(columnName='wins', field='/wins', defaultValue='0')]
kudu_table_name = get_random_string(string.ascii_letters, 10)
kudu_master_address = '{}:{}'.format(cluster.server_host, DEFAULT_KUDU_PORT)
# Build the pipeline.
builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='JSON',
raw_data=raw_data)
kudu = builder.add_stage('Kudu Lookup',
type='processor').set_attributes(kudu_masters=kudu_master_address,
kudu_table_name='{}.{}'.format('impala::default',
kudu_table_name),
key_columns_mapping=key_columns_mapping,
column_to_output_field_mapping=column_to_output_field_mapping,
case_sensitive=True,
ignore_missing_value=True)
record_deduplicator = builder.add_stage('Record Deduplicator')
to_error = builder.add_stage('To Error')
trash = builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> kudu >> trash
record_deduplicator >> to_error
pipeline = builder.build().configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
metadata = sqlalchemy.MetaData()
tdf_contenders_table = sqlalchemy.Table(kudu_table_name,
metadata,
sqlalchemy.Column('rank', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String),
sqlalchemy.Column('wins', sqlalchemy.Integer),
impala_partition_by='HASH PARTITIONS 16',
impala_stored_as='KUDU',
impala_table_properties={
'kudu.master_addresses': kudu_master_address,
'kudu.num_tablet_replicas': '1'
})
try:
logger.info('Creating Kudu table %s ...', kudu_table_name)
engine = cluster.kudu.engine
tdf_contenders_table.create(engine)
conn = engine.connect()
conn.execute(tdf_contenders_table.insert(), [
{'rank': 1, 'name': None, 'wins': None},
{'rank': 2, 'name': None, 'wins': None}])
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
for result in snapshot[kudu.instance_name].output:
if Version(sdc_executor.version) >= Version('3.2.0.0'):
assert 'name' not in result.field
else:
assert result.field['name'].value == 'None'
assert int(result.field['wins'].value) == 0
finally:
logger.info('Dropping Kudu table %s ...', kudu_table_name)
tdf_contenders_table.drop(engine)
@cluster('cdh')
@sdc_min_version('2.7.0.0')
def test_kudu_lookup_case_sensitive(sdc_builder, sdc_executor, cluster):
"""
Test the case sensitive option. This pipeline should fail with case sensitive option false
because the random table name contains uppsercase and lowercase and this pipeline converts
table name to all lowercase. Therefore table won't be found.
dev_raw_data_source >> kudu lookup >> trash
"""
if not hasattr(cluster, 'kudu'):
pytest.skip('Kudu tests only run against clusters with the Kudu service present.')
# Generate some data.
tour_de_france_contenders = [dict(favorite_rank=1),
dict(favorite_rank=2)]
raw_data = ''.join([json.dumps(contender) for contender in tour_de_france_contenders])
key_columns_mapping = [dict(field='/favorite_rank', columnName='rank')]
column_to_output_field_mapping = [dict(columnName='name', field='/name'),
dict(columnName='wins', field='/wins')]
kudu_table_name = get_random_string(string.ascii_letters, 10)
kudu_master_address = '{}:{}'.format(cluster.server_host, DEFAULT_KUDU_PORT)
# Build the pipeline.
builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='JSON',
raw_data=raw_data)
kudu = builder.add_stage('Kudu Lookup',
type='processor').set_attributes(kudu_masters=kudu_master_address,
kudu_table_name='{}.{}'.format('impala::default',
kudu_table_name),
key_columns_mapping=key_columns_mapping,
column_to_output_field_mapping=column_to_output_field_mapping,
case_sensitive=False,
ignore_missing_value=True)
trash = builder.add_stage('Trash')
dev_raw_data_source >> kudu >> trash
pipeline = builder.build().configure_for_environment(cluster)
pipeline.configuration["shouldRetry"] = False
sdc_executor.add_pipeline(pipeline)
metadata = sqlalchemy.MetaData()
tdf_contenders_table = sqlalchemy.Table(kudu_table_name,
metadata,
sqlalchemy.Column('rank', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String),
sqlalchemy.Column('wins', sqlalchemy.Integer),
impala_partition_by='HASH PARTITIONS 16',
impala_stored_as='KUDU',
impala_table_properties={
'kudu.master_addresses': kudu_master_address,
'kudu.num_tablet_replicas': '1'
})
try:
logger.info('Creating Kudu table %s ...', kudu_table_name)
engine = cluster.kudu.engine
tdf_contenders_table.create(engine)
sdc_executor.start_pipeline(pipeline, wait=False)
sdc_executor.get_pipeline_status(pipeline).wait_for_status('START_ERROR')
# Test will fail if the pipeline doesn't stop at START_ERROR
finally:
logger.info('Dropping Kudu table %s ...', kudu_table_name)
tdf_contenders_table.drop(engine)
@cluster('cdh')
@sdc_min_version('2.7.0.0')
def test_kudu_lookup_data_types(sdc_builder, sdc_executor, cluster):
"""
Tests if outgoing records have correct data types and values.
This test uses a table with a compound key.
dev_raw_data_source >> record_deduplicator >> kudu >> trash
record_deduplicator >> to_error
"""
if not hasattr(cluster, 'kudu'):
pytest.skip('Kudu tests only run against clusters with the Kudu service present.')
# Generate some data.
test_data = [dict(rank=1, name='Chris Froome', wins=100, consecutive=True,
prize=1232354385, total_miles=27454, average_speed=536.1),
dict(rank=2, name='Greg LeMond', wins=50, consecutive=False,
prize=23423958, total_miles=25326, average_speed=500.1),
dict(rank=4, name='Vincenzo Nibali', wins=40, consecutive=False,
prize=987245, total_miles=13534, average_speed=356.9),
dict(rank=3, name='Nairo Quintana', wins=30, consecutive=True,
prize=875432, total_miles=13545, average_speed=289.15)]
tour_de_france_contenders = [dict(favorite_rank=1, name='Chris Froome'),
dict(favorite_rank=2, name='Greg LeMond'),
dict(favorite_rank=4, name='Vincenzo Nibali'),
dict(favorite_rank=3, name='Nairo Quintana'),
dict(favorite_rank=5, name='StreamSets,Inc')] # This should go to error record
raw_data = ''.join([json.dumps(contender) for contender in tour_de_france_contenders])
key_columns_mapping = [dict(field='/favorite_rank', columnName='rank'),
dict(field='/name', columnName='name')]
# Generate different field names than column names in Kudu
column_to_output_field_mapping = [dict(columnName='wins', field='/wins', defaultValue='0'),
dict(columnName='consecutive', field='/consecutive_2017', defaultValue='false'),
dict(columnName='prize', field='/prize_2017', defaultValue='0'),
dict(columnName='total_miles', field='/total_miles_2017', defaultValue='0'),
dict(columnName='average_speed', field='/avg_speed_2017', defaultValue='0')]
kudu_table_name = get_random_string(string.ascii_letters, 10)
kudu_master_address = '{}:{}'.format(cluster.server_host, DEFAULT_KUDU_PORT)
# Build the pipeline.
builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='JSON',
raw_data=raw_data)
kudu = builder.add_stage('Kudu Lookup',
type='processor').set_attributes(kudu_masters=kudu_master_address,
kudu_table_name='{}.{}'.format('impala::default',
kudu_table_name),
key_columns_mapping=key_columns_mapping,
column_to_output_field_mapping=column_to_output_field_mapping,
case_sensitive=True,
ignore_missing_value=True)
record_deduplicator = builder.add_stage('Record Deduplicator')
to_error = builder.add_stage('To Error')
trash = builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> kudu >> trash
record_deduplicator >> to_error
pipeline = builder.build().configure_for_environment(cluster)
pipeline.delivery_guarantee = 'AT_MOST_ONCE'
sdc_executor.add_pipeline(pipeline)
metadata = sqlalchemy.MetaData()
# Impala cannot create a kudu table with decimal and unixtime_micros
tdf_contenders_table = sqlalchemy.Table(kudu_table_name,
metadata,
sqlalchemy.Column('rank', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String, primary_key=True),
sqlalchemy.Column('wins', sqlalchemy.Integer),
sqlalchemy.Column('consecutive', sqlalchemy.Boolean),
sqlalchemy.Column('prize', sqlalchemy.BigInteger),
sqlalchemy.Column('total_miles', sqlalchemy.SmallInteger),
sqlalchemy.Column('average_speed', sqlalchemy.Float),
impala_partition_by='HASH PARTITIONS 16',
impala_stored_as='KUDU',
impala_table_properties={
'kudu.master_addresses': kudu_master_address,
'kudu.num_tablet_replicas': '1'
})
try:
logger.info('Creating Kudu table %s ...', kudu_table_name)
engine = cluster.kudu.engine
tdf_contenders_table.create(engine)
conn = engine.connect()
conn.execute(tdf_contenders_table.insert(), test_data)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
# Check the data type and value
for actual, expected in zip(snapshot[kudu.instance_name].output, test_data):
assert actual.field['name'].value == expected['name']
assert actual.field['wins'].type == 'INTEGER'
assert int(actual.field['wins'].value) == expected['wins']
assert actual.field['consecutive_2017'].type == 'BOOLEAN'
assert bool(actual.field['consecutive_2017'].value) == expected['consecutive']
assert actual.field['prize_2017'].type == 'LONG'
# Integer is long in Python3
assert int(actual.field['prize_2017'].value) == expected['prize']
assert actual.field['total_miles_2017'].type == 'SHORT'
assert int(actual.field['total_miles_2017'].value) == expected['total_miles']
assert actual.field['avg_speed_2017'].type == 'FLOAT'
assert float(actual.field['avg_speed_2017'].value) == expected['average_speed']
assert len(snapshot[kudu.instance_name].error_records) == 1
finally:
logger.info('Dropping Kudu table %s ...', kudu_table_name)
tdf_contenders_table.drop(engine)
@cluster('cdh')
@sdc_min_version('2.7.0.0')
def test_kudu_lookup_ignore_missing(sdc_builder, sdc_executor, cluster):
"""
Test for ignore missing option. Default is true, but when set to false, it sends record to error when
row is found but its lookup column doesn't have value (null). This sets ignore missing to false and
checks error record.
dev_raw_data_source >> record_deduplicator >> kudu >> trash
record_deduplicator >> to_error
"""
if not hasattr(cluster, 'kudu'):
pytest.skip('Kudu tests only run against clusters with the Kudu service present.')
tour_de_france_contenders = [dict(favorite_rank=1),
dict(favorite_rank=2)]
raw_data = ''.join([json.dumps(contender) for contender in tour_de_france_contenders])
key_columns_mapping = [dict(field='/favorite_rank', columnName='rank')]
column_to_output_field_mapping = [dict(columnName='name', field='/name'),
dict(columnName='wins', field='/wins')]
kudu_table_name = get_random_string(string.ascii_letters, 10)
kudu_master_address = '{}:{}'.format(cluster.server_host, DEFAULT_KUDU_PORT)
# Build the pipeline.
builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='JSON',
raw_data=raw_data)
kudu = builder.add_stage('Kudu Lookup',
type='processor').set_attributes(kudu_masters=kudu_master_address,
kudu_table_name='{}.{}'.format('impala::default',
kudu_table_name),
key_columns_mapping=key_columns_mapping,
column_to_output_field_mapping=column_to_output_field_mapping,
case_sensitive=True,
ignore_missing_value=False)
record_deduplicator = builder.add_stage('Record Deduplicator')
to_error = builder.add_stage('To Error')
trash = builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> kudu >> trash
record_deduplicator >> to_error
pipeline = builder.build().configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
metadata = sqlalchemy.MetaData()
tdf_contenders_table = sqlalchemy.Table(kudu_table_name,
metadata,
sqlalchemy.Column('rank', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String),
sqlalchemy.Column('wins', sqlalchemy.Integer),
impala_partition_by='HASH PARTITIONS 16',
impala_stored_as='KUDU',
impala_table_properties={
'kudu.master_addresses': kudu_master_address,
'kudu.num_tablet_replicas': '1'
})
try:
logger.info('Creating Kudu table %s ...', kudu_table_name)
engine = cluster.kudu.engine
tdf_contenders_table.create(engine)
conn = engine.connect()
conn.execute(tdf_contenders_table.insert(), [
{'rank': 1, 'name': 'Chris Froome', 'wins': None},
{'rank': 2, 'name': 'Greg LeMond', 'wins': None}])
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[kudu.instance_name].error_records) == 2
finally:
logger.info('Dropping Kudu table %s ...', kudu_table_name)
tdf_contenders_table.drop(engine)
@cluster('cdh')
@sdc_min_version('3.1.0.0')
def test_kudu_lookup_missing_primary_keys(sdc_builder, sdc_executor, cluster):
"""
Test if lookup can perform without primary keys.
dev_raw_data_source >> record_deduplicator >> kudu lookup >> trash
record_deduplicator >> to_error
"""
if not hasattr(cluster, 'kudu'):
pytest.skip('Kudu tests only run against clusters with the Kudu service present.')
# Perform lookup by a column 'name' which is not primary key and see if other columns can be retrieved.
tour_de_france_contenders = [dict(name='Chris Froome'),
dict(name='Greg LeMond')]
raw_data = ''.join([json.dumps(contender) for contender in tour_de_france_contenders])
key_columns_mapping = [dict(field='/name', columnName='name')]
column_to_output_field_mapping = [dict(field='/favorite_rank', columnName='rank'),
dict(field='/wins', columnName='wins')]
kudu_table_name = get_random_string(string.ascii_letters, 10)
# Build the pipeline.
kudu_master_address = f'{cluster.server_host}:{DEFAULT_KUDU_PORT}'
builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='JSON',
raw_data=raw_data)
kudu = builder.add_stage('Kudu Lookup',
type='processor').set_attributes(kudu_masters=kudu_master_address,
kudu_table_name='{}.{}'.format('impala::default',
kudu_table_name),
key_columns_mapping=key_columns_mapping,
column_to_output_field_mapping=column_to_output_field_mapping,
case_sensitive=True,
ignore_missing_value=False)
record_deduplicator = builder.add_stage('Record Deduplicator')
to_error = builder.add_stage('To Error')
trash = builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> kudu >> trash
record_deduplicator >> to_error
pipeline = builder.build().configure_for_environment(cluster)
pipeline.configuration["shouldRetry"] = False
sdc_executor.add_pipeline(pipeline)
metadata = sqlalchemy.MetaData()
tdf_contenders_table = sqlalchemy.Table(kudu_table_name,
metadata,
sqlalchemy.Column('rank', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String),
sqlalchemy.Column('wins', sqlalchemy.Integer),
impala_partition_by='HASH PARTITIONS 16',
impala_stored_as='KUDU',
impala_table_properties={
'kudu.master_addresses': kudu_master_address,
'kudu.num_tablet_replicas': '1'
})
try:
logger.info('Creating Kudu table %s ...', kudu_table_name)
engine = cluster.kudu.engine
tdf_contenders_table.create(engine)
conn = engine.connect()
sample_data = [
{'rank': 1, 'name': 'Chris Froome', 'wins': 5},
{'rank': 2, 'name': 'Greg LeMond', 'wins': 10}]
conn.execute(tdf_contenders_table.insert(), sample_data)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
# Check the returned values
for actual, expected in zip(snapshot[kudu.instance_name].output, sample_data):
assert actual.field['favorite_rank'].value == expected['rank']
assert actual.field['wins'].value == expected['wins']
assert actual.field['name'].value == str(expected['name'])
finally:
logger.info('Dropping Kudu table %s ...', kudu_table_name)
tdf_contenders_table.drop(engine)
@cluster('cdh')
@sdc_min_version('3.6.0')
def test_kudu_lookup_decimal_type(sdc_builder, sdc_executor, cluster):
"""
After inserting rows in a Kudu table containing a decimal type column check that decimal type column is correctly
retrieved by Kudu processor
dev_raw_data_source >> kudu >> trash
"""
if not hasattr(cluster, 'kudu'):
pytest.skip('Kudu tests only run against clusters with the Kudu service present.')
if not cluster.kudu.version >= '1.7.0':
pytest.skip('Test only designed to run on Kudu version >= 1.7.0')
tour_de_france_contenders = [dict(rank=1, weight=150.58),
dict(rank=2, weight=140.11)]
raw_data = ''.join([json.dumps(contender) for contender in tour_de_france_contenders])
key_columns_mapping = [dict(field='/rank', columnName='rank')]
column_to_output_field_mapping = [dict(columnName='rank', field='/rank'),
dict(columnName='weight', field='/weight', defaultValue='0')]
kudu_table_name = get_random_string(string.ascii_letters, 10)
kudu_master_address = '{}:{}'.format(cluster.server_host, DEFAULT_KUDU_PORT)
# Build the pipeline.
builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='JSON',
raw_data=raw_data)
kudu = builder.add_stage('Kudu Lookup',
type='processor').set_attributes(kudu_masters=kudu_master_address,
kudu_table_name='{}.{}'.format('impala::default',
kudu_table_name),
key_columns_mapping=key_columns_mapping,
column_to_output_field_mapping=column_to_output_field_mapping,
case_sensitive=True,
ignore_missing_value=True)
trash = builder.add_stage('Trash')
dev_raw_data_source >> kudu >> trash
pipeline = builder.build().configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
metadata = sqlalchemy.MetaData()
tdf_contenders_table = sqlalchemy.Table(kudu_table_name,
metadata,
sqlalchemy.Column('rank', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('weight', sqlalchemy.DECIMAL(5,2)),
impala_partition_by='HASH PARTITIONS 16',
impala_stored_as='KUDU',
impala_table_properties={
'kudu.master_addresses': kudu_master_address,
'kudu.num_tablet_replicas': '1'
})
try:
logger.info('Creating Kudu table %s ...', kudu_table_name)
engine = cluster.kudu.engine
tdf_contenders_table.create(engine)
conn = engine.connect()
conn.execute(tdf_contenders_table.insert(), tour_de_france_contenders)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
i = 0
for result in snapshot[kudu.instance_name].output:
assert result.field['weight'].value == tour_de_france_contenders[i]['weight']
i += 1
finally:
logger.info('Dropping Kudu table %s ...', kudu_table_name)
tdf_contenders_table.drop(engine)
@cluster('cdh')
def test_hive_query_executor(sdc_builder, sdc_executor, cluster):
"""Test Hive query executor stage. This is acheived by using a deduplicator which assures us that there is
only one successful ingest. The pipeline would look like:
dev_raw_data_source >> record_deduplicator >> hive_query
>> trash
"""
hive_table_name = get_random_string(string.ascii_letters, 10)
hive_cursor = cluster.hive.client.cursor()
sql_queries = ["CREATE TABLE ${record:value('/text')} (id int, name string)"]
builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='TEXT',
raw_data=hive_table_name)
record_deduplicator = builder.add_stage('Record Deduplicator')
trash = builder.add_stage('Trash')
hive_query = builder.add_stage('Hive Query', type='executor').set_attributes(sql_queries=sql_queries)
dev_raw_data_source >> record_deduplicator >> hive_query
record_deduplicator >> trash
pipeline = builder.build(title='Hive query executor pipeline').configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
try:
# assert successful query execution of the pipeline
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert (snapshot[hive_query.instance_name].event_records[0].header['values']['sdc.event.type'] ==
'successful-query')
# assert Hive table creation
assert hive_cursor.table_exists(hive_table_name)
# Re-running the same query to create Hive table should fail the query. So assert the failure.
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert snapshot[hive_query.instance_name].event_records[0].header['values']['sdc.event.type'] == 'failed-query'
finally:
# drop the Hive table
hive_cursor.execute(f'DROP TABLE `{hive_table_name}`')
@cluster('cdh')
def test_mapreduce_executor(sdc_builder, sdc_executor, cluster):
"""Test MapReduce executor stage. This is acheived by using a deduplicator which assures us that there is
only one successful ingest and that we ingest to HDFS. The executor then triggers MapReduce job which should
convert the ingested HDFS Avro data to Parquet. The pipeline would look like:
dev_raw_data_source >> record_deduplicator >> hadoop_fs >= mapreduce
>> trash
"""
hdfs_directory = '/tmp/out/{}'.format(get_random_string(string.ascii_letters, 10))
product_data = [dict(name='iphone', price=649.99),
dict(name='pixel', price=649.89)]
raw_data = ''.join([json.dumps(product) for product in product_data])
avro_schema = ('{ "type" : "record", "name" : "STF", "fields" : '
'[ { "name" : "name", "type" : "string" }, { "name" : "price", "type" : "double" } ] }')
builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='JSON',
raw_data=raw_data)
record_deduplicator = builder.add_stage('Record Deduplicator')
trash = builder.add_stage('Trash')
hadoop_fs = builder.add_stage('Hadoop FS', type='destination')
# max_records_in_file enables to close the file and generate the event
hadoop_fs.set_attributes(avro_schema=avro_schema, avro_schema_location='INLINE', data_format='AVRO',
directory_template=hdfs_directory, files_prefix='sdc-${sdc:id()}', max_records_in_file=1)
mapreduce = builder.add_stage('MapReduce', type='executor')
mapreduce.job_type = 'AVRO_PARQUET'
mapreduce.output_directory = hdfs_directory
dev_raw_data_source >> record_deduplicator >> hadoop_fs >= mapreduce
record_deduplicator >> trash
pipeline = builder.build(title='MapReduce executor pipeline').configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
# assert events (MapReduce) generated
assert len(snapshot[mapreduce.instance_name].event_records) == len(product_data)
# make sure MapReduce job is done and is successful
for event in snapshot[mapreduce.instance_name].event_records:
job_id = event.field['job-id'].value
assert cluster.yarn.wait_for_job_to_end(job_id) == 'SUCCEEDED'
# assert parquet data is same as what is ingested
for event in snapshot[hadoop_fs.instance_name].event_records:
file_path = event.field['filepath'].value
hdfs_parquet_file_path = '{}.parquet'.format(file_path)
hdfs_data = cluster.hdfs.get_data_from_parquet(hdfs_parquet_file_path)
assert hdfs_data[0] in product_data
finally:
# remove HDFS files
cluster.hdfs.client.delete(hdfs_directory, recursive=True)
@cluster('cdh')
def test_spark_executor(sdc_builder, sdc_executor, cluster):
"""Test Spark executor stage. This is acheived by using 2 pipelines. The 1st pipeline would generate the
application resource file (Python in this case) which will be used by the 2nd pipeline for spark-submit. Spark
executor will do the spark-submit and we assert that it has submitted the job to Yarn. The pipelines would
look like:
dev_raw_data_source >> local_fs >= pipeline_finisher_executor
dev_raw_data_source >> record_deduplicator >> spark_executor
record_deduplicator >> trash
"""
python_data = 'print("Hello World!")'
tmp_directory = '/tmp/out/{}'.format(get_random_string(string.ascii_letters, 10))
python_suffix = 'py'
application_name = ''.join(['stf_', get_random_string(string.ascii_letters, 10)])
# build the 1st pipeline - file generator
builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='TEXT',
raw_data=python_data)
local_fs = builder.add_stage('Local FS', type='destination')
local_fs.set_attributes(data_format='TEXT', directory_template=tmp_directory,
files_prefix='sdc-${sdc:id()}', files_suffix=python_suffix, max_records_in_file=1)
# we use the finisher so as local_fs can generate event with file_path being generated
pipeline_finisher_executor = builder.add_stage('Pipeline Finisher Executor')
dev_raw_data_source >> local_fs >= pipeline_finisher_executor
pipeline = builder.build(title='To File pipeline').configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
# run the pipeline and capture the file path
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
file_path = snapshot[local_fs.instance_name].event_records[0].field['filepath'].value
# build the 2nd pipeline - spark executor
builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source').set_attributes(data_format='TEXT',
raw_data='dummy')
record_deduplicator = builder.add_stage('Record Deduplicator')
trash = builder.add_stage('Trash')
spark_executor = builder.add_stage(name=SPARK_EXECUTOR_STAGE_NAME)
spark_executor.set_attributes(cluster_manager='YARN',
minimum_number_of_worker_nodes=1,
maximum_number_of_worker_nodes=1,
application_name=application_name,
deploy_mode='CLUSTER',
driver_memory='10m',
executor_memory='10m',
application_resource=file_path,
language='PYTHON')
dev_raw_data_source >> record_deduplicator >> spark_executor
record_deduplicator >> trash
pipeline = builder.build(title='Spark executor pipeline').configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_batch_count(1)
sdc_executor.stop_pipeline(pipeline)
# assert Spark executor has triggered the YARN job
assert cluster.yarn.wait_for_app_to_register(application_name)
| 53.245484 | 124 | 0.584644 |
f768fbd890aa858308d2d0a97bcac43e5ac6f865
| 593 |
py
|
Python
|
workalendar/europe/luxembourg.py
|
vanadium23/workalendar
|
4c67b5a7900fa56d7a93b767c6cbd8f1cc6b70a7
|
[
"MIT"
] | null | null | null |
workalendar/europe/luxembourg.py
|
vanadium23/workalendar
|
4c67b5a7900fa56d7a93b767c6cbd8f1cc6b70a7
|
[
"MIT"
] | null | null | null |
workalendar/europe/luxembourg.py
|
vanadium23/workalendar
|
4c67b5a7900fa56d7a93b767c6cbd8f1cc6b70a7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from workalendar.core import WesternCalendar, ChristianMixin
from workalendar.registry import iso_register
@iso_register('LU')
class Luxembourg(WesternCalendar, ChristianMixin):
name = 'Luxembourg'
include_easter_monday = True
include_ascension = True
include_whit_monday = True
include_all_saints = True
include_assumption = True
include_boxing_day = True
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(5, 1, "Labour Day"),
(6, 23, "Luxembourg National Holiday"),
)
| 26.954545 | 60 | 0.725126 |
f7692d603bd9297bc2e47fabac253b03fa341830
| 418 |
py
|
Python
|
src/kill_nodes.py
|
srikanteswartalluri/container-orchestrator
|
3cd07989e28ece0038cb0b7c6bbdc83c1c0ba7a9
|
[
"Apache-2.0"
] | null | null | null |
src/kill_nodes.py
|
srikanteswartalluri/container-orchestrator
|
3cd07989e28ece0038cb0b7c6bbdc83c1c0ba7a9
|
[
"Apache-2.0"
] | null | null | null |
src/kill_nodes.py
|
srikanteswartalluri/container-orchestrator
|
3cd07989e28ece0038cb0b7c6bbdc83c1c0ba7a9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
from log_utils import logger
from cli_utils import CLIUtils
from constants import *
(out, err) = CLIUtils.run(LIST_NODES_CMD)
logger.debug('list of containers: {}'.format(out))
lines = out.strip().split("\n")
for i in range(1, len(lines)):
container_id = lines[i].split()[0]
(out, err) = CLIUtils.run(KILL_NODE_CMD.format(container_id))
logger.info("{} killed".format(container_id))
| 24.588235 | 65 | 0.705742 |
f76946044e36c57a8f33127e2b7dffb2b1b1feb9
| 6,791 |
py
|
Python
|
recipes/windnoise/model/CRN.py
|
wangwei2009/speechbrain
|
ebbac4561a9c9101786e0ab0b1105017eb655fc8
|
[
"Apache-2.0"
] | null | null | null |
recipes/windnoise/model/CRN.py
|
wangwei2009/speechbrain
|
ebbac4561a9c9101786e0ab0b1105017eb655fc8
|
[
"Apache-2.0"
] | null | null | null |
recipes/windnoise/model/CRN.py
|
wangwei2009/speechbrain
|
ebbac4561a9c9101786e0ab0b1105017eb655fc8
|
[
"Apache-2.0"
] | null | null | null |
"""
single channel speech enhancement for wind noise reduction.
refer to
"A Convolutional Recurrent Neural Network for Real-Time Speech Enhancement" .
Authors
* Wang Wei 2021
"""
import torch
import torch.nn as nn
class CNN_Block(torch.nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size=[3, 3],
stride=(1,2),
padding=(1,0)) -> None:
super().__init__()
self.layers = torch.nn.ModuleList()
self.layers.append(nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding))
self.layers.append(nn.BatchNorm2d(out_channels))
self.layers.append(nn.ELU())
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
class RNN_Block(torch.nn.Module):
def __init__(self,
input_size=1792,
hidden_size=1792,
num_layers=2,
rnn_type='LSTM',
dropout=0.2) -> None:
super().__init__()
self.rnn_type = rnn_type
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.dropout = dropout
if self.rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(self.input_size,
self.hidden_size, self.num_layers,
batch_first=True, dropout=self.dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError("""An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(self.input_size, self.hidden_size, self.num_layers, nonlinearity=nonlinearity, dropout=self.dropout)
# self.hidden = self.init_hidden(batch_size)
def init_hidden(self, batch_size=1):
if self.rnn_type == 'GRU':
return torch.zeros(self.num_layers * self.directions_count, batch_size, self.hidden_dim).to(self.device)
elif self.rnn_type == 'LSTM':
return (
torch.zeros(self.num_layers * self.directions_count, batch_size, self.hidden_dim).to(self.device),
torch.zeros(self.num_layers * self.directions_count, batch_size, self.hidden_dim).to(self.device))
else:
raise Exception('Unknown rnn_type. Valid options: "gru", "lstm"')
def forward(self, x):
self.rnn.flatten_parameters()
x, _ = self.rnn(x)
return x
class DeCNN_Block(torch.nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size=[3, 3],
stride=(1,2),
padding=(1,0),
output_padding=0) -> None:
super().__init__()
self.layers = torch.nn.ModuleList()
self.layers.append(
nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding))
self.layers.append(nn.BatchNorm2d(out_channels))
self.layers.append(nn.ELU())
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
class Encoder(torch.nn.Module):
def __init__(self, in_channels=1, channels=16, layers=5, scale=2) -> None:
super().__init__()
self.cnn_b1 = CNN_Block(1, channels)
self.cnn_b2 = CNN_Block(channels, channels*2)
self.cnn_b3 = CNN_Block(channels*2, channels*4)
self.cnn_b4 = CNN_Block(channels*4, channels*8)
self.cnn_b5 = CNN_Block(channels*8, channels*16)
def forward(self, x):
o1 = self.cnn_b1(x)
o2 = self.cnn_b2(o1)
o3 = self.cnn_b3(o2)
o4 = self.cnn_b4(o3)
o5 = self.cnn_b5(o4)
return o1, o2, o3, o4, o5
class Decoder(torch.nn.Module):
def __init__(self, in_channels=512, layers=5, scale=2) -> None:
super().__init__()
self.decnn_b5 = DeCNN_Block(512, 128)
self.decnn_b4 = DeCNN_Block(256, 64)
self.decnn_b3 = DeCNN_Block(128, 32)
self.decnn_b2 = DeCNN_Block(64, 16, output_padding=(0,1))
self.decnn_b1 = DeCNN_Block(32, 1)
def forward(self, x, decoder_o5, decoder_o4, decoder_o3, decoder_o2, decoder_o1):
o5 = self.decnn_b5(torch.cat((x, decoder_o5), 1))
o4 = self.decnn_b4(torch.cat((o5, decoder_o4), 1))
o3 = self.decnn_b3(torch.cat((o4, decoder_o3), 1))
o2 = self.decnn_b2(torch.cat((o3, decoder_o2), 1))
o = self.decnn_b1(torch.cat((o2, decoder_o1), 1))
return o
class crn(torch.nn.Module):
"""Basic RNN model with projection layers between RNN layers.
Arguments
---------
input_size : int
Size of the expected input in the 3rd dimension.
rnn_size : int
Number of neurons to use in rnn (for each direction -> and <-).
projection : int
Number of neurons in projection layer.
layers : int
Number of RNN layers to use.
"""
def __init__(self, input_size=161, contex=0, bidir=False, rnn_size=128, projection=64, layers=2):
super().__init__()
self.layers = torch.nn.ModuleList()
if input_size == 257:
rnn_size = 1792
elif input_size == 161:
rnn_size = 1024
self.encoder = Encoder()
self.rnn = RNN_Block(input_size=rnn_size, hidden_size=rnn_size)
self.decoder = Decoder()
def forward(self, x: torch.Tensor):
"""model forward
Args:
x (tensor): input tenosr, [N,T,F]
Returns:
[type]: [description]
"""
# N, T, F = x.size()
if len(x.shape)==3:
x = x.unsqueeze(1) # [N,T,F] to [N, 1, T, F]
N, C, T, F = x.size()
o1, o2, o3, o4, o5 = self.encoder(x)
embeded_ch = o5.size(1)
rnn_in = o5.transpose(1, 2)
rnn_in = rnn_in.reshape(N, T, -1)
rnn_out = self.rnn(rnn_in)
rnn_out = rnn_out.unsqueeze(1)
decoder_in = rnn_out.reshape(N, embeded_ch, T, -1)
decoder_out = self.decoder(decoder_in, o5, o4, o3, o2, o1)
return decoder_out.squeeze(1)
if __name__ == "__main__":
N, C, T, F = 10, 1, 100, 257
data = torch.rand((N, T,F))
print(data.shape)
model = crn(input_size=F)
output = model(data)
print(output.shape)
# input_size = 257
# contex = 3
# model = CustomModel(input_size, contex=contex)
# # input_data = torch.rand(100, 20, input_size)
from torchsummary import summary
summary(model, (1, 100, 257))
| 31.009132 | 130 | 0.581799 |
f7698d5b9fc03824409bc00b255d8a1eadcba365
| 62,024 |
py
|
Python
|
cla-backend/cla/routes.py
|
CareyLF/easycla
|
f251867abb012f98acb7451bdf21b1bc64ffe77e
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 1 |
2019-09-26T10:34:01.000Z
|
2019-09-26T10:34:01.000Z
|
cla-backend/cla/routes.py
|
CareyLF/easycla
|
f251867abb012f98acb7451bdf21b1bc64ffe77e
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
cla-backend/cla/routes.py
|
CareyLF/easycla
|
f251867abb012f98acb7451bdf21b1bc64ffe77e
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
# Copyright The Linux Foundation and each contributor to CommunityBridge.
# SPDX-License-Identifier: MIT
"""
The entry point for the CLA service. Lays out all routes and controller functions.
"""
import hug
from falcon import HTTP_401
from hug.middleware import LogMiddleware
import cla
import cla.auth
import cla.controllers.company
import cla.controllers.gerrit
import cla.controllers.github
import cla.controllers.project
import cla.controllers.project_logo
import cla.controllers.repository
import cla.controllers.repository_service
import cla.controllers.signature
import cla.controllers.signing
import cla.controllers.user
import cla.hug_types
import cla.salesforce
from cla.utils import get_supported_repository_providers, \
get_supported_document_content_types, \
get_session_middleware
#
# Middleware
#
# Session Middleware
# hug.API('cla/routes').http.add_middleware(get_session_middleware())
# CORS Middleware
@hug.response_middleware()
def process_data(request, response, resource):
# response.set_header('Access-Control-Allow-Origin', cla.conf['ALLOW_ORIGIN'])
response.set_header('Access-Control-Allow-Origin', '*')
response.set_header('Access-Control-Allow-Credentials', 'true')
response.set_header('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, OPTIONS')
response.set_header('Access-Control-Allow-Headers', 'Content-Type, Authorization')
# Here we comment out the custom 404. Make it back to Hug default 404 behaviour
# Custom 404.
#
# @hug.not_found()
# def not_found():
# """Custom 404 handler to hide the default hug behaviour of displaying all available routes."""
# return {'error': {'status': status.HTTP_NOT_FOUND,
# 'description': 'URL is invalid.'
# }
# }
@hug.directive()
def check_auth(request=None, **kwargs):
"""Returns the authenticated user"""
return request and cla.auth.authenticate_user(request.headers)
@hug.exception(cla.auth.AuthError)
def handle_auth_error(exception, response=None, **kwargs):
"""Handles authentication errors"""
response.status = HTTP_401
return exception.response
#
# Health check route.
#
@hug.get('/health', versions=2)
def get_health(request):
"""
GET: /health
Returns a basic health check on the CLA system.
"""
cla.salesforce.get_projects(request, '')
request.context['session']['health'] = 'up'
return request.headers
#
# User routes.
#
# @hug.get('/user', versions=1)
# def get_users():
# """
# GET: /user
# Returns all CLA users.
# """
# # staff_verify(user)
# return cla.controllers.user.get_users()
@hug.get('/user/{user_id}', versions=2)
def get_user(request,
user_id: hug.types.uuid):
"""
GET: /user/{user_id}
Returns the requested user data based on ID.
"""
try:
auth_user = check_auth(request)
except cla.auth.AuthError as auth_err:
if auth_err.response == 'missing authorization header':
cla.log.info('getting github user: {}'.format(user_id))
else:
raise auth_err
return cla.controllers.user.get_user(user_id=user_id)
# @hug.get('/user/email/{user_email}', versions=1)
# def get_user_email(user_email: cla.hug_types.email, auth_user: check_auth):
# """
# GET: /user/email/{user_email}
# Returns the requested user data based on user email.
# TODO: Need to look into whether this has to be locked down more (by staff maybe?). Would that
# break the user flow from GitHub?
# """
# return cla.controllers.user.get_user(user_email=user_email)
@hug.post('/user/gerrit', versions=1)
def post_or_get_user_gerrit(auth_user: check_auth):
"""
GET: /user/gerrit
For a Gerrit user, there is a case where a user with an lfid may be a user in the db.
An endpoint to get a userId for gerrit, or create and retrieve the userId if not existent.
"""
return cla.controllers.user.get_or_create_user(auth_user).to_dict()
# @hug.get('/user/github/{user_github_id}', versions=1)
# def get_user_github(user_github_id: hug.types.number, user: cla_user):
# """
# GET: /user/github/{user_github_id}
# Returns the requested user data based on user GitHub ID.
# TODO: Should this be locked down more? Staff only?
# """
# return cla.controllers.user.get_user(user_github_id=user_github_id)
# @hug.post('/user', versions=1,
# examples=" - {'user_email': '[email protected]', 'user_name': 'User Name', \
# 'user_company_id': '<org-id>', 'user_github_id': 12345)")
# def post_user(user: cla_user, user_email: cla.hug_types.email, user_name=None,
# user_company_id=None, user_github_id=None):
# """
# POST: /user
# DATA: {'user_email': '[email protected]', 'user_name': 'User Name',
# 'user_company_id': '<org-id>', 'user_github_id': 12345}
# Returns the data of the newly created user.
# """
# # staff_verify(user) # Only staff can create users.
# return cla.controllers.user.create_user(user_email=user_email,
# user_name=user_name,
# user_company_id=user_company_id,
# user_github_id=user_github_id)
# @hug.put('/user', versions=1,
# examples=" - {'user_id': '<user-id>', 'user_github_id': 23456)")
# def put_user(user: cla_user, user_id: hug.types.uuid, user_email=None, user_name=None,
# user_company_id=None, user_github_id=None):
# """
# PUT: /user
# DATA: {'user_id': '<user-id>', 'user_github_id': 23456}
# Supports all the same fields as the POST equivalent.
# Returns the data of the updated user.
# TODO: Should the user be able to update their own CLA data?
# """
# return cla.controllers.user.update_user(user_id,
# user_email=user_email,
# user_name=user_name,
# user_company_id=user_company_id,
# user_github_id=user_github_id)
# @hug.delete('/user/{user_id}', versions=1)
# def delete_user(user: cla_user, user_id: hug.types.uuid):
# """
# DELETE: /user/{user_id}
# Deletes the specified user.
# """
# # staff_verify(user)
# return cla.controllers.user.delete_user(user_id)
@hug.get('/user/{user_id}/signatures', versions=1)
def get_user_signatures(auth_user: check_auth, user_id: hug.types.uuid):
"""
GET: /user/{user_id}/signatures
Returns a list of signatures associated with a user.
"""
return cla.controllers.user.get_user_signatures(user_id)
@hug.get('/users/company/{user_company_id}', versions=1)
def get_users_company(auth_user: check_auth, user_company_id: hug.types.uuid):
"""
GET: /users/company/{user_company_id}
Returns a list of users associated with an company.
TODO: Should probably not simply be auth only - need some role check?
"""
return cla.controllers.user.get_users_company(user_company_id)
@hug.post('/user/{user_id}/request-company-whitelist/{company_id}', versions=2)
def request_company_whitelist(user_id: hug.types.uuid, company_id: hug.types.uuid,
user_email: cla.hug_types.email, project_id: hug.types.uuid, message=None):
"""
POST: /user/{user_id}/request-company-whitelist/{company_id}
DATA: {'user_email': <email-selection>, 'message': 'custom message to manager'}
Performs the necessary actions (ie: send email to manager) when the specified user requests to
be added the the specified company's whitelist.
"""
return cla.controllers.user.request_company_whitelist(user_id, str(company_id), str(user_email), str(project_id),
message)
@hug.post('/user/{user_id}/invite-company-admin', versions=2)
def invite_company_admin(user_id: hug.types.uuid,
user_email: cla.hug_types.email,
admin_name: hug.types.text,
admin_email: cla.hug_types.email,
project_name: hug.types.text):
"""
POST: /user/{user_id}/invite-company-admin
DATA: {
'admin_name': John Doe,
'admin_email': [email protected],
'user_email': [email protected],
'project_name': Project Name
}
Sends an Email to the user's admin to sign up through the ccla console.
"""
return cla.controllers.user.invite_company_admin(user_id, str(user_email), str(admin_name), str(admin_email),
project_name)
@hug.post('/user/{user_id}/request-company-ccla', versions=2)
def request_company_ccla(user_id: hug.types.uuid,
user_email: cla.hug_types.email,
company_id: hug.types.uuid,
project_id: hug.types.uuid):
"""
POST: /user/{user_id}/request_company_ccla
Sends an Email to an admin of an existing company to sign a CCLA.
"""
return cla.controllers.user.request_company_ccla(str(user_id), str(user_email), str(company_id), str(project_id))
@hug.post('/user/{user_id}/company/{company_id}/request-access', versions=2)
def request_company_admin_access(user_id: hug.types.uuid,
company_id: hug.types.uuid):
"""
POST: /user/{user_id}/company/{company_id}/request-access
Sends an Email for a user requesting access to be on Company ACL.
"""
return cla.controllers.user.request_company_admin_access(str(user_id), str(company_id))
@hug.get('/user/{user_id}/active-signature', versions=2)
def get_user_active_signature(user_id: hug.types.uuid):
"""
GET: /user/{user_id}/active-signature
Returns all metadata associated with a user's active signature.
{'user_id': <user-id>,
'project_id': <project-id>,
'repository_id': <repository-id>,
'pull_request_id': <PR>,
'return_url': <url-where-user-initiated-signature-from>'}
Returns null if the user does not have an active signature.
"""
return cla.controllers.user.get_active_signature(user_id)
@hug.get('/user/{user_id}/project/{project_id}/last-signature', versions=2)
def get_user_project_last_signature(user_id: hug.types.uuid, project_id: hug.types.uuid):
"""
GET: /user/{user_id}/project/{project_id}/last-signature
Returns the user's latest ICLA signature for the project specified.
"""
return cla.controllers.user.get_user_project_last_signature(user_id, project_id)
@hug.get('/user/{user_id}/project/{project_id}/last-signature/{company_id}', versions=1)
def get_user_project_company_last_signature(user_id: hug.types.uuid,
project_id: hug.types.uuid,
company_id: hug.types.uuid):
"""
GET: /user/{user_id}/project/{project_id}/last-signature/{company_id}
Returns the user's latest employee signature for the project and company specified.
"""
return cla.controllers.user.get_user_project_company_last_signature(user_id, project_id, company_id)
# #
# # Signature Routes.
# #
# @hug.get('/signature', versions=1)
# def get_signatures(auth_user: check_auth):
# """
# GET: /signature
# Returns all CLA signatures.
# """
# # staff_verify(user)
# return cla.controllers.signature.get_signatures()
@hug.get('/signature/{signature_id}', versions=1)
def get_signature(auth_user: check_auth, signature_id: hug.types.uuid):
"""
GET: /signature/{signature_id}
Returns the CLA signature requested by UUID.
"""
return cla.controllers.signature.get_signature(signature_id)
@hug.post('/signature', versions=1,
examples=" - {'signature_type': 'cla', 'signature_signed': true, \
'signature_approved': true, 'signature_sign_url': 'http://sign.com/here', \
'signature_return_url': 'http://cla-system.com/signed', \
'signature_project_id': '<project-id>', \
'signature_reference_id': '<ref-id>', \
'signature_reference_type': 'individual'}")
def post_signature(auth_user: check_auth, # pylint: disable=too-many-arguments
signature_project_id: hug.types.uuid,
signature_reference_id: hug.types.text,
signature_reference_type: hug.types.one_of(['company', 'user']),
signature_type: hug.types.one_of(['cla', 'dco']),
signature_signed: hug.types.smart_boolean,
signature_approved: hug.types.smart_boolean,
signature_return_url: cla.hug_types.url,
signature_sign_url: cla.hug_types.url,
signature_user_ccla_company_id=None):
"""
POST: /signature
DATA: {'signature_type': 'cla',
'signature_signed': true,
'signature_approved': true,
'signature_sign_url': 'http://sign.com/here',
'signature_return_url': 'http://cla-system.com/signed',
'signature_project_id': '<project-id>',
'signature_user_ccla_company_id': '<company-id>',
'signature_reference_id': '<ref-id>',
'signature_reference_type': 'individual'}
signature_reference_type is either 'individual' or 'corporate', depending on the CLA type.
signature_reference_id needs to reflect the user or company tied to this signature.
Returns a CLA signatures that was created.
"""
return cla.controllers.signature.create_signature(signature_project_id,
signature_reference_id,
signature_reference_type,
signature_type=signature_type,
signature_user_ccla_company_id=signature_user_ccla_company_id,
signature_signed=signature_signed,
signature_approved=signature_approved,
signature_return_url=signature_return_url,
signature_sign_url=signature_sign_url)
@hug.put('/signature', versions=1,
examples=" - {'signature_id': '01620259-d202-4350-8264-ef42a861922d', \
'signature_type': 'cla', 'signature_signed': true}")
def put_signature(auth_user: check_auth, # pylint: disable=too-many-arguments
signature_id: hug.types.uuid,
signature_project_id=None,
signature_reference_id=None,
signature_reference_type=None,
signature_type=None,
signature_signed=None,
signature_approved=None,
signature_return_url=None,
signature_sign_url=None,
domain_whitelist=None,
email_whitelist=None,
github_whitelist=None):
"""
PUT: /signature
DATA: {'signature_id': '<signature-id>',
'signature_type': 'cla', 'signature_signed': true}
Supports all the fields as the POST equivalent.
Returns the CLA signature that was just updated.
"""
return cla.controllers.signature.update_signature(
signature_id,
signature_project_id=signature_project_id,
signature_reference_id=signature_reference_id,
signature_reference_type=signature_reference_type,
signature_type=signature_type,
signature_signed=signature_signed,
signature_approved=signature_approved,
signature_return_url=signature_return_url,
signature_sign_url=signature_sign_url,
domain_whitelist=domain_whitelist,
email_whitelist=email_whitelist,
github_whitelist=github_whitelist)
@hug.delete('/signature/{signature_id}', versions=1)
def delete_signature(auth_user: check_auth, signature_id: hug.types.uuid):
"""
DELETE: /signature/{signature_id}
Deletes the specified signature.
"""
# staff_verify(user)
return cla.controllers.signature.delete_signature(signature_id)
@hug.get('/signatures/user/{user_id}', versions=1)
def get_signatures_user(auth_user: check_auth, user_id: hug.types.uuid):
"""
GET: /signatures/user/{user_id}
Get all signatures for user specified.
"""
return cla.controllers.signature.get_user_signatures(user_id)
@hug.get('/signatures/user/{user_id}/project/{project_id}', versions=1)
def get_signatures_user_project(auth_user: check_auth, user_id: hug.types.uuid, project_id: hug.types.uuid):
"""
GET: /signatures/user/{user_id}/project/{project_id}
Get all signatures for user, filtered by project_id specified.
"""
return cla.controllers.signature.get_user_project_signatures(user_id, project_id)
@hug.get('/signatures/user/{user_id}/project/{project_id}/type/{signature_type}', versions=1)
def get_signatures_user_project(auth_user: check_auth,
user_id: hug.types.uuid,
project_id: hug.types.uuid,
signature_type: hug.types.one_of(['individual', 'employee'])):
"""
GET: /signatures/user/{user_id}/project/{project_id}/type/[individual|corporate|employee]
Get all signatures for user, filtered by project_id and signature type specified.
"""
return cla.controllers.signature.get_user_project_signatures(user_id, project_id, signature_type)
@hug.get('/signatures/company/{company_id}', versions=1)
def get_signatures_company(auth_user: check_auth, company_id: hug.types.uuid):
"""
GET: /signatures/company/{company_id}
Get all signatures for company specified.
"""
return cla.controllers.signature.get_company_signatures_by_acl(auth_user.username, company_id)
@hug.get('/signatures/project/{project_id}', versions=1)
def get_signatures_project(auth_user: check_auth, project_id: hug.types.uuid):
"""
GET: /signatures/project/{project_id}
Get all signatures for project specified.
"""
return cla.controllers.signature.get_project_signatures(project_id)
@hug.get('/signatures/company/{company_id}/project/{project_id}', versions=1)
def get_signatures_project_company(company_id: hug.types.uuid, project_id: hug.types.uuid):
"""
GET: /signatures/company/{company_id}/project/{project_id}
Get all signatures for project specified and a company specified
"""
return cla.controllers.signature.get_project_company_signatures(company_id, project_id)
@hug.get('/signatures/company/{company_id}/project/{project_id}/employee', versions=1)
def get_project_employee_signatures(company_id: hug.types.uuid, project_id: hug.types.uuid):
"""
GET: /signatures/company/{company_id}/project/{project_id}
Get all employee signatures for project specified and a company specified
"""
return cla.controllers.signature.get_project_employee_signatures(company_id, project_id)
@hug.get('/signature/{signature_id}/manager', versions=1)
def get_cla_managers(auth_user: check_auth, signature_id: hug.types.uuid):
"""
GET: /project/{project_id}/managers
Returns the CLA Managers from a CCLA's signature ACL.
"""
return cla.controllers.signature.get_cla_managers(auth_user.username, signature_id)
@hug.post('/signature/{signature_id}/manager', versions=1)
def add_cla_manager(auth_user: check_auth,
signature_id: hug.types.uuid,
lfid: hug.types.text):
"""
POST: /project/{project_id}/manager
Adds CLA Manager to a CCLA's signature ACL and returns the new list of CLA managers.
"""
return cla.controllers.signature.add_cla_manager(auth_user, signature_id, lfid)
@hug.delete('/signature/{signature_id}/manager/{lfid}', versions=1)
def remove_cla_manager(auth_user: check_auth,
signature_id: hug.types.uuid,
lfid: hug.types.text):
"""
DELETE: /signature/{signature_id}/manager/{lfid}
Removes a CLA Manager from a CCLA's signature ACL and returns the modified list of CLA Managers.
"""
return cla.controllers.signature.remove_cla_manager(auth_user.username, signature_id, lfid)
#
# Repository Routes.
#
# @hug.get('/repository', versions=1)
# def get_repositories(auth_user: check_auth):
# """
# GET: /repository
# Returns all CLA repositories.
# """
# # staff_verify(user)
# return cla.controllers.repository.get_repositories()
@hug.get('/repository/{repository_id}', versions=1)
def get_repository(auth_user: check_auth, repository_id: hug.types.text):
"""
GET: /repository/{repository_id}
Returns the CLA repository requested by UUID.
"""
return cla.controllers.repository.get_repository(repository_id)
@hug.post('/repository', versions=1,
examples=" - {'repository_project_id': '<project-id>', \
'repository_external_id': 'repo1', \
'repository_name': 'Repo Name', \
'repository_organization_name': 'Organization Name', \
'repository_type': 'github', \
'repository_url': 'http://url-to-repo.com'}")
def post_repository(auth_user: check_auth, # pylint: disable=too-many-arguments
repository_project_id: hug.types.uuid,
repository_name: hug.types.text,
repository_organization_name: hug.types.text,
repository_type: hug.types.one_of(get_supported_repository_providers().keys()),
repository_url: cla.hug_types.url,
repository_external_id=None):
"""
POST: /repository
DATA: {'repository_project_id': '<project-id>',
'repository_external_id': 'repo1',
'repository_name': 'Repo Name',
'repository_organization_name': 'Organization Name',
'repository_type': 'github',
'repository_url': 'http://url-to-repo.com'}
repository_external_id is the ID of the repository given by the repository service provider.
It is used to redirect the user back to the appropriate location once signing is complete.
Returns the CLA repository that was just created.
"""
return cla.controllers.repository.create_repository(auth_user,
repository_project_id,
repository_name,
repository_organization_name,
repository_type,
repository_url,
repository_external_id)
@hug.put('/repository', versions=1,
examples=" - {'repository_id': '<repo-id>', \
'repository_id': 'http://new-url-to-repository.com'}")
def put_repository(auth_user: check_auth, # pylint: disable=too-many-arguments
repository_id: hug.types.text,
repository_project_id=None,
repository_name=None,
repository_type=None,
repository_url=None,
repository_external_id=None):
"""
PUT: /repository
DATA: {'repository_id': '<repo-id>',
'repository_url': 'http://new-url-to-repository.com'}
Returns the CLA repository that was just updated.
"""
return cla.controllers.repository.update_repository(
repository_id,
repository_project_id=repository_project_id,
repository_name=repository_name,
repository_type=repository_type,
repository_url=repository_url,
repository_external_id=repository_external_id)
@hug.delete('/repository/{repository_id}', versions=1)
def delete_repository(auth_user: check_auth, repository_id: hug.types.text):
"""
DELETE: /repository/{repository_id}
Deletes the specified repository.
"""
# staff_verify(user)
return cla.controllers.repository.delete_repository(repository_id)
# #
# # Company Routes.
# #
@hug.get('/company', versions=1)
def get_companies(auth_user: check_auth):
"""
GET: /company
Returns all CLA companies associated with user.
"""
cla.controllers.user.get_or_create_user(auth_user) # Find or Create user -- For first login
return cla.controllers.company.get_companies_by_user(auth_user.username)
@hug.get('/company', versions=2)
def get_all_companies():
"""
GET: /company
Returns all CLA companies.
"""
return cla.controllers.company.get_companies()
@hug.get('/company/{company_id}', versions=2)
def get_company(company_id: hug.types.text):
"""
GET: /company/{company_id}
Returns the CLA company requested by UUID.
"""
return cla.controllers.company.get_company(company_id)
@hug.get('/company/{company_id}/project/unsigned', versions=1)
def get_unsigned_projects_for_company(company_id: hug.types.text):
"""
GET: /company/{company_id}/project/unsigned
Returns a list of projects that the company has not signed CCLAs for.
"""
return cla.controllers.project.get_unsigned_projects_for_company(company_id)
@hug.post('/company', versions=1,
examples=" - {'company_name': 'Company Name', \
'company_manager_id': 'user-id'}")
def post_company(response,
auth_user: check_auth,
company_name: hug.types.text,
company_manager_user_name=None,
company_manager_user_email=None,
company_manager_id=None):
"""
POST: /company
DATA: {'company_name': 'Org Name',
'company_manager_id': <user-id>}
Returns the CLA company that was just created.
"""
create_resp = cla.controllers.company.create_company(
auth_user,
company_name=company_name,
company_manager_id=company_manager_id,
company_manager_user_name=company_manager_user_name,
company_manager_user_email=company_manager_user_email)
response.status = create_resp.get("status_code")
return create_resp.get("data")
@hug.put('/company', versions=1,
examples=" - {'company_id': '<company-id>', \
'company_name': 'New Company Name'}")
def put_company(auth_user: check_auth, # pylint: disable=too-many-arguments
company_id: hug.types.uuid,
company_name=None,
company_manager_id=None):
"""
PUT: /company
DATA: {'company_id': '<company-id>',
'company_name': 'New Company Name'}
Returns the CLA company that was just updated.
"""
return cla.controllers.company.update_company(
company_id,
company_name=company_name,
company_manager_id=company_manager_id,
username=auth_user.username)
@hug.delete('/company/{company_id}', versions=1)
def delete_company(auth_user: check_auth, company_id: hug.types.text):
"""
DELETE: /company/{company_id}
Deletes the specified company.
"""
# staff_verify(user)
return cla.controllers.company.delete_company(company_id, username=auth_user.username)
@hug.put('/company/{company_id}/import/whitelist/csv', versions=1)
def put_company_whitelist_csv(body, auth_user: check_auth, company_id: hug.types.uuid):
"""
PUT: /company/{company_id}/import/whitelist/csv
Imports a CSV file of whitelisted user emails.
Expects the first column to have a header in the first row and contain email addresses.
"""
# staff_verify(user) or company_manager_verify(user, company_id)
content = body.read().decode()
return cla.controllers.company.update_company_whitelist_csv(content, company_id, username=auth_user.username)
@hug.get('/companies/{manager_id}', version=1)
def get_manager_companies(manager_id: hug.types.uuid):
"""
GET: /companies/{manager_id}
Returns a list of companies a manager is associated with
"""
return cla.controllers.company.get_manager_companies(manager_id)
# #
# # Project Routes.
# #
@hug.get('/project', versions=1)
def get_projects(auth_user: check_auth):
"""
GET: /project
Returns all CLA projects.
"""
# staff_verify(user)
projects = cla.controllers.project.get_projects()
# For public endpoint, don't show the project_external_id.
for project in projects:
if 'project_external_id' in project:
del project['project_external_id']
return projects
@hug.get('/project/{project_id}', versions=2)
def get_project(project_id: hug.types.uuid):
"""
GET: /project/{project_id}
Returns the CLA project requested by ID.
"""
project = cla.controllers.project.get_project(project_id)
# For public endpoint, don't show the project_external_id.
if 'project_external_id' in project:
del project['project_external_id']
return project
@hug.get('/project/{project_id}/manager', versions=1)
def get_project_managers(auth_user: check_auth, project_id: hug.types.uuid):
"""
GET: /project/{project_id}/managers
Returns the CLA project managers.
"""
return cla.controllers.project.get_project_managers(auth_user.username, project_id)
@hug.post('/project/{project_id}/manager', versions=1)
def add_project_manager(auth_user: check_auth,
project_id: hug.types.text,
lfid: hug.types.text):
"""
POST: /project/{project_id}/manager
Returns the new list of project managers
"""
return cla.controllers.project.add_project_manager(auth_user.username, project_id, lfid)
@hug.delete('/project/{project_id}/manager/{lfid}', versions=1)
def remove_project_manager(auth_user: check_auth,
project_id: hug.types.text,
lfid: hug.types.text):
"""
DELETE: /project/{project_id}/project/{lfid}
Returns a success message if it was deleted
"""
return cla.controllers.project.remove_project_manager(auth_user.username, project_id, lfid)
@hug.get('/project/external/{project_external_id}', version=1)
def get_external_project(auth_user: check_auth, project_external_id: hug.types.text):
"""
GET: /project/external/{project_external_id}
Returns the list of CLA projects marching the requested external ID.
"""
return cla.controllers.project.get_projects_by_external_id(project_external_id, auth_user.username)
@hug.post('/project', versions=1,
examples=" - {'project_name': 'Project Name'}")
def post_project(auth_user: check_auth, project_external_id: hug.types.text, project_name: hug.types.text,
project_icla_enabled: hug.types.boolean, project_ccla_enabled: hug.types.boolean,
project_ccla_requires_icla_signature: hug.types.boolean):
"""
POST: /project
DATA: {'project_external_id': '<proj-external-id>', 'project_name': 'Project Name',
'project_icla_enabled': True, 'project_ccla_enabled': True,
'project_ccla_requires_icla_signature': True}
Returns the CLA project that was just created.
"""
# staff_verify(user) or pm_verify_external_id(user, project_external_id)
return cla.controllers.project.create_project(project_external_id, project_name,
project_icla_enabled, project_ccla_enabled,
project_ccla_requires_icla_signature,
auth_user.username)
@hug.put('/project', versions=1,
examples=" - {'project_id': '<proj-id>', \
'project_name': 'New Project Name'}")
def put_project(auth_user: check_auth, project_id: hug.types.uuid, project_name=None,
project_icla_enabled=None, project_ccla_enabled=None,
project_ccla_requires_icla_signature=None):
"""
PUT: /project
DATA: {'project_id': '<project-id>',
'project_name': 'New Project Name'}
Returns the CLA project that was just updated.
"""
# staff_verify(user) or pm_verify(user, project_id)
return cla.controllers.project.update_project(project_id, project_name=project_name,
project_icla_enabled=project_icla_enabled,
project_ccla_enabled=project_ccla_enabled,
project_ccla_requires_icla_signature=project_ccla_requires_icla_signature,
username=auth_user.username)
@hug.delete('/project/{project_id}', versions=1)
def delete_project(auth_user: check_auth, project_id: hug.types.uuid):
"""
DELETE: /project/{project_id}
Deletes the specified project.
"""
# staff_verify(user)
return cla.controllers.project.delete_project(project_id, username=auth_user.username)
@hug.get('/project/{project_id}/repositories', versions=1)
def get_project_repositories(auth_user: check_auth, project_id: hug.types.uuid):
"""
GET: /project/{project_id}/repositories
Gets the specified project's repositories.
"""
return cla.controllers.project.get_project_repositories(auth_user, project_id)
@hug.get('/project/{project_id}/repositories_group_by_organization', versions=1)
def get_project_repositories_group_by_organization(auth_user: check_auth, project_id: hug.types.uuid):
"""
GET: /project/{project_id}/repositories_by_org
Gets the specified project's repositories. grouped by organization name
"""
return cla.controllers.project.get_project_repositories_group_by_organization(auth_user, project_id)
@hug.get('/project/{project_id}/configuration_orgs_and_repos', versions=1)
def get_project_configuration_orgs_and_repos(auth_user: check_auth, project_id: hug.types.uuid):
"""
GET: /project/{project_id}/configuration_orgs_and_repos
Gets the repositories from github api
Gets all repositories for from an sfdc project ID
"""
return cla.controllers.project.get_project_configuration_orgs_and_repos(auth_user, project_id)
@hug.get('/project/{project_id}/document/{document_type}', versions=2)
def get_project_document(project_id: hug.types.uuid,
document_type: hug.types.one_of(['individual', 'corporate'])):
"""
GET: /project/{project_id}/document/{document_type}
Fetch a project's signature document.
"""
return cla.controllers.project.get_project_document(project_id, document_type)
@hug.get('/project/{project_id}/document/{document_type}/pdf', version=2)
def get_project_document_raw(response, auth_user: check_auth, project_id: hug.types.uuid,
document_type: hug.types.one_of(['individual', 'corporate'])):
"""
GET: /project/{project_id}/document/{document_type}/pdf
Returns the PDF document matching the latest individual or corporate contract for that project.
"""
response.set_header('Content-Type', 'application/pdf')
return cla.controllers.project.get_project_document_raw(project_id, document_type)
@hug.get('/project/{project_id}/document/{document_type}/pdf/{document_major_version}/{document_minor_version}',
version=1)
def get_project_document_matching_version(response, auth_user: check_auth, project_id: hug.types.uuid,
document_type: hug.types.one_of(['individual', 'corporate']),
document_major_version: hug.types.number,
document_minor_version: hug.types.number):
"""
GET: /project/{project_id}/document/{document_type}/pdf/{document_major_version}/{document_minor_version}
Returns the PDF document version matching the individual or corporate contract for that project.
"""
response.set_header('Content-Type', 'application/pdf')
return cla.controllers.project.get_project_document_raw(project_id, document_type,
document_major_version=document_major_version,
document_minor_version=document_minor_version)
@hug.get('/project/{project_id}/companies', versions=2)
def get_project_companies(project_id: hug.types.uuid):
"""
GET: /project/{project_id}/companies
s
Check if project exists and retrieves all companies
"""
return cla.controllers.project.get_project_companies(project_id)
@hug.post('/project/{project_id}/document/{document_type}', versions=1,
examples=" - {'document_name': 'doc_name.pdf', \
'document_content_type': 'url+pdf', \
'document_content': 'http://url.com/doc.pdf', \
'new_major_version': true}")
def post_project_document(auth_user: check_auth,
project_id: hug.types.uuid,
document_type: hug.types.one_of(['individual', 'corporate']),
document_name: hug.types.text,
document_content_type: hug.types.one_of(get_supported_document_content_types()),
document_content: hug.types.text,
document_preamble=None,
document_legal_entity_name=None,
new_major_version=None):
"""
POST: /project/{project_id}/document/{document_type}
DATA: {'document_name': 'doc_name.pdf',
'document_content_type': 'url+pdf',
'document_content': 'http://url.com/doc.pdf',
'document_preamble': 'Preamble here',
'document_legal_entity_name': 'Legal entity name',
'new_major_version': false}
Creates a new CLA document for a specified project.
Will create a new revision of the individual or corporate document. if new_major_version is set,
the document will have a new major version and this will force users to re-sign.
If document_content_type starts with 'storage+', the document_content is assumed to be base64
encoded binary data that will be saved in the CLA system's configured storage service.
"""
# staff_verify(user) or pm_verify(user, project_id)
return cla.controllers.project.post_project_document(
project_id=project_id,
document_type=document_type,
document_name=document_name,
document_content_type=document_content_type,
document_content=document_content,
document_preamble=document_preamble,
document_legal_entity_name=document_legal_entity_name,
new_major_version=new_major_version,
username=auth_user.username)
@hug.post('/project/{project_id}/document/template/{document_type}', versions=1,
examples=" - {'document_name': 'doc_name.pdf', \
'document_preamble': 'Preamble here', \
'document_legal_entity_name': 'Legal entity name', \
'template_name': 'CNCFTemplate', \
'new_major_version': true}")
def post_project_document_template(auth_user: check_auth,
project_id: hug.types.uuid,
document_type: hug.types.one_of(['individual', 'corporate']),
document_name: hug.types.text,
document_preamble: hug.types.text,
document_legal_entity_name: hug.types.text,
template_name: hug.types.one_of([
'CNCFTemplate',
'OpenBMCTemplate',
'TungstenFabricTemplate',
'OpenColorIOTemplate',
'OpenVDBTemplate',
'ONAPTemplate',
'TektonTemplate'
]),
new_major_version=None):
"""
POST: /project/{project_id}/document/template/{document_type}
# DATA: {'document_name': 'doc_name.pdf',
# 'document_preamble': 'Preamble here',
# 'document_legal_entity_name': 'Legal entity name',
# 'template_name': 'CNCFTemplate',
# 'new_major_version': false}
# Creates a new CLA document from a template for a specified project.
# Will create a new revision of the individual or corporate document. if new_major_version is set,
# the document will have a new major version and this will force users to re-sign.
# The document_content_type is assumed to be 'storage+pdf', which means the document content will
# be saved in the CLA system's configured storage service.
# """
# staff_verify(user) or pm_verify(user, project_id)
return cla.controllers.project.post_project_document_template(
project_id=project_id,
document_type=document_type,
document_name=document_name,
document_preamble=document_preamble,
document_legal_entity_name=document_legal_entity_name,
template_name=template_name,
new_major_version=new_major_version,
username=auth_user.username)
@hug.delete('/project/{project_id}/document/{document_type}/{major_version}/{minor_version}', versions=1)
def delete_project_document(auth_user: check_auth,
project_id: hug.types.uuid,
document_type: hug.types.one_of(['individual', 'corporate']),
major_version: hug.types.number,
minor_version: hug.types.number):
# """
# DELETE: /project/{project_id}/document/{document_type}/{revision}
# Delete a project's signature document by revision.
# """
# # staff_verify(user)
return cla.controllers.project.delete_project_document(project_id,
document_type,
major_version,
minor_version,
username=auth_user.username)
# #
# # Document Signing Routes.
# #
@hug.post('/request-individual-signature', versions=2,
examples=" - {'project_id': 'some-proj-id', \
'user_id': 'some-user-uuid'}")
def request_individual_signature(project_id: hug.types.uuid,
user_id: hug.types.uuid,
return_url_type=None,
return_url=None):
"""
POST: /request-individual-signature
DATA: {'project_id': 'some-project-id',
'user_id': 'some-user-id',
'return_url_type': Gerrit/Github. Optional depending on presence of return_url
'return_url': <optional>}
Creates a new signature given project and user IDs. The user will be redirected to the
return_url once signature is complete.
Returns a dict of the format:
{'user_id': <user_id>,
'signature_id': <signature_id>,
'project_id': <project_id>,
'sign_url': <sign_url>}
User should hit the provided URL to initiate the signing process through the
signing service provider.
"""
return cla.controllers.signing.request_individual_signature(project_id, user_id, return_url_type, return_url)
@hug.post('/request-corporate-signature', versions=1,
examples=" - {'project_id': 'some-proj-id', \
'company_id': 'some-company-uuid'}")
def request_corporate_signature(auth_user: check_auth,
project_id: hug.types.uuid,
company_id: hug.types.uuid,
send_as_email=False,
authority_name=None,
authority_email=None,
return_url_type=None,
return_url=None):
"""
POST: /request-corporate-signature
DATA: {'project_id': 'some-project-id',
'company_id': 'some-company-id',
'send_as_email': 'boolean',
'authority_name': 'string',
'authority_email': 'string',
'return_url': <optional>}
Creates a new signature given project and company IDs. The manager will be redirected to the
return_url once signature is complete.
TThe send_as_email flag determines whether to send the signing document because the signer
may not necessarily be a corporate authority with signing privileges.
Returns a dict of the format:
{'company_id': <user_id>,
'signature_id': <signature_id>,
'project_id': <project_id>,
'sign_url': <sign_url>}
Manager should hit the provided URL to initiate the signing process through the
signing service provider.
"""
# staff_verify(user) or company_manager_verify(user, company_id)
return cla.controllers.signing.request_corporate_signature(auth_user, project_id, company_id, send_as_email,
authority_name, authority_email, return_url_type,
return_url)
@hug.post('/request-employee-signature', versions=2)
def request_employee_signature(project_id: hug.types.uuid,
company_id: hug.types.uuid,
user_id: hug.types.uuid,
return_url_type: hug.types.text,
return_url=None):
"""
POST: /request-employee-signature
DATA: {'project_id': <project-id>,
'company_id': <company-id>,
'user_id': <user-id>,
'return_url': <optional>}
Creates a placeholder signature object that represents an employee of a company having confirmed
that they indeed work for company X which already has a CCLA with the project. This does not
require a full DocuSign signature process, which means the sign/callback URLs and document
versions may not be populated or reliable.
"""
return cla.controllers.signing.request_employee_signature(project_id, company_id, user_id, return_url_type,
return_url)
@hug.post('/check-prepare-employee-signature', versions=2)
def check_and_prepare_employee_signature(project_id: hug.types.uuid,
company_id: hug.types.uuid,
user_id: hug.types.uuid):
"""
POST: /check-employee-ccla-and-whitelist
DATA: {'project_id': <project-id>,
'company_id': <company-id>,
'user_id': <user-id>
}
Checks if an employee is ready to sign a CCLA for a company.
"""
return cla.controllers.signing.check_and_prepare_employee_signature(project_id, company_id, user_id)
@hug.post('/signed/individual/{installation_id}/{github_repository_id}/{change_request_id}', versions=2)
def post_individual_signed(body,
installation_id: hug.types.number,
github_repository_id: hug.types.number,
change_request_id: hug.types.number):
"""
POST: /signed/individual/{installation_id}/{github_repository_id}/{change_request_id}
TODO: Need to protect this endpoint somehow - at the very least ensure it's coming from
DocuSign and the data hasn't been tampered with.
Callback URL from signing service upon ICLA signature.
"""
content = body.read()
return cla.controllers.signing.post_individual_signed(content, installation_id, github_repository_id,
change_request_id)
@hug.post('/signed/gerrit/individual/{user_id}', versions=2)
def post_individual_signed_gerrit(body,
user_id: hug.types.uuid):
"""
POST: /signed/gerritindividual/{user_id}
Callback URL from signing service upon ICLA signature for a Gerrit user.
"""
content = body.read()
return cla.controllers.signing.post_individual_signed_gerrit(content, user_id)
@hug.post('/signed/corporate/{project_id}/{company_id}', versions=2)
def post_corporate_signed(body,
project_id: hug.types.uuid,
company_id: hug.types.uuid):
"""
POST: /signed/corporate/{project_id}/{company_id}
TODO: Need to protect this endpoint somehow - at the very least ensure it's coming from
DocuSign and the data hasn't been tampered with.
Callback URL from signing service upon CCLA signature.
"""
content = body.read()
return cla.controllers.signing.post_corporate_signed(content, project_id, company_id)
@hug.get('/return-url/{signature_id}', versions=2)
def get_return_url(signature_id: hug.types.uuid, event=None):
"""
GET: /return-url/{signature_id}
The endpoint the user will be redirected to upon completing signature. Will utilize the
signature's "signature_return_url" field to redirect the user to the appropriate location.
Will also capture the signing service provider's return GET parameters, such as DocuSign's
'event' flag that describes the redirect reason.
"""
return cla.controllers.signing.return_url(signature_id, event)
@hug.post('/send-authority-email', versions=2)
def send_authority_email(auth_user: check_auth,
company_name: hug.types.text,
project_name: hug.types.text,
authority_name: hug.types.text,
authority_email: cla.hug_types.email):
"""
POST: /send-authority-email
DATA: {
'authority_name': John Doe,
'authority_email': [email protected],
'company_id': <company_id>
'project_id': <project_id>
}
"""
return cla.controllers.signing.send_authority_email(company_name, project_name, authority_name, authority_email)
# #
# # Repository Provider Routes.
# #
@hug.get('/repository-provider/{provider}/sign/{installation_id}/{github_repository_id}/{change_request_id}',
versions=2)
def sign_request(provider: hug.types.one_of(get_supported_repository_providers().keys()),
installation_id: hug.types.text,
github_repository_id: hug.types.text,
change_request_id: hug.types.text,
request):
"""
GET: /repository-provider/{provider}/sign/{installation_id}/{repository_id}/{change_request_id}
The endpoint that will initiate a CLA signature for the user.
"""
return cla.controllers.repository_service.sign_request(provider,
installation_id,
github_repository_id,
change_request_id,
request)
@hug.get('/repository-provider/{provider}/oauth2_redirect', versions=2)
def oauth2_redirect(auth_user: check_auth, # pylint: disable=too-many-arguments
provider: hug.types.one_of(get_supported_repository_providers().keys()),
state: hug.types.text,
code: hug.types.text,
repository_id: hug.types.text,
change_request_id: hug.types.text,
request=None):
"""
GET: /repository-provider/{provider}/oauth2_redirect
TODO: This has been deprecated in favor of GET:/github/installation for GitHub Apps.
Handles the redirect from an OAuth2 provider when initiating a signature.
"""
# staff_verify(user)
return cla.controllers.repository_service.oauth2_redirect(provider,
state,
code,
repository_id,
change_request_id,
request)
@hug.post('/repository-provider/{provider}/activity', versions=2)
def received_activity(body,
provider: hug.types.one_of(get_supported_repository_providers().keys())):
"""
POST: /repository-provider/{provider}/activity
TODO: Need to secure this endpoint somehow - maybe use GitHub's Webhook secret option.
Acts upon a code repository provider's activity.
"""
return cla.controllers.repository_service.received_activity(provider,
body)
#
# GitHub Routes.
#
@hug.get('/github/organizations', versions=1)
def get_github_organizations(auth_user: check_auth):
"""
GET: /github/organizations
Returns all CLA Github Organizations.
"""
return cla.controllers.github.get_organizations()
@hug.get('/github/organizations/{organization_name}', versions=1)
def get_github_organization(auth_user: check_auth, organization_name: hug.types.text):
"""
GET: /github/organizations/{organization_name}
Returns the CLA Github Organization requested by Name.
"""
return cla.controllers.github.get_organization(organization_name)
@hug.get('/github/organizations/{organization_name}/repositories', versions=1)
def get_github_organization_repos(auth_user: check_auth, organization_name: hug.types.text):
"""
GET: /github/organizations/{organization_name}/repositories
Returns a list of Repositories selected under this organization.
"""
return cla.controllers.github.get_organization_repositories(organization_name)
@hug.get('/sfdc/{sfid}/github/organizations', versions=1)
def get_github_organization_by_sfid(auth_user: check_auth, sfid: hug.types.text):
"""
GET: /github/organizations/sfdc/{sfid}
Returns a list of Github Organizations under this SFDC ID.
"""
return cla.controllers.github.get_organization_by_sfid(auth_user, sfid)
@hug.post('/github/organizations', versions=1,
examples=" - {'organization_sfid': '<organization-sfid>', \
'organization_name': 'org-name'}")
def post_github_organization(auth_user: check_auth, # pylint: disable=too-many-arguments
organization_name: hug.types.text,
organization_sfid: hug.types.text):
"""
POST: /github/organizations
DATA: { 'auth_user' : AuthUser to verify user permissions
'organization_sfid': '<sfid-id>',
'organization_name': 'org-name'}
Returns the CLA GitHub Organization that was just created.
"""
return cla.controllers.github.create_organization(auth_user,
organization_name,
organization_sfid)
@hug.delete('/github/organizations/{organization_name}', versions=1)
def delete_organization(auth_user: check_auth, organization_name: hug.types.text):
"""
DELETE: /github/organizations/{organization_name}
Deletes the specified Github Organization.
"""
# staff_verify(user)
return cla.controllers.github.delete_organization(auth_user, organization_name)
@hug.get('/github/installation', versions=2)
def github_oauth2_callback(code, state, request):
"""
GET: /github/installation
TODO: Need to secure this endpoint - possibly with GitHub's Webhook secrets.
GitHub will send the user to this endpoint when new OAuth2 handshake occurs.
This needs to match the callback used when users install the app as well (below).
"""
return cla.controllers.github.user_oauth2_callback(code, state, request)
@hug.post('/github/installation', versions=2)
def github_app_installation(body, request, response):
"""
POST: /github/installation
TODO: Need to secure this endpoint - possibly with GitHub's Webhook secret.
GitHub will fire off this webhook when new installation of our CLA app occurs.
"""
return cla.controllers.github.user_authorization_callback(body)
@hug.post('/github/activity', versions=2)
def github_app_activity(body, request, response):
"""
POST: /github/activity
TODO: Need to secure this endpoint with GitHub's Webhook secret.
Acts upon any events triggered by our app installed in someone's organization.
"""
# Verify that Webhook Signature is valid
# valid_request = cla.controllers.github.webhook_secret_validation(request.headers.get('X-HUB-SIGNATURE'), request.stream.read())
# cla.log.info(valid_request)
# if valid_request:
return cla.controllers.github.activity(body)
# else:
# response.status = HTTP_403
# return {'status': 'Not Authorized'}
@hug.post('/github/validate', versions=1)
def github_organization_validation(body):
"""
POST: /github/validate
TODO: Need to secure this endpoint with GitHub's Webhook secret.
"""
return cla.controllers.github.validate_organization(body)
@hug.get('/github/check/namespace/{namespace}', versions=1)
def github_check_namespace(namespace):
"""
GET: /github/check/namespace/{namespace}
Returns True if the namespace provided is a valid GitHub account.
"""
return cla.controllers.github.check_namespace(namespace)
@hug.get('/github/get/namespace/{namespace}', versions=1)
def github_get_namespace(namespace):
"""
GET: /github/get/namespace/{namespace}
Returns info on the GitHub account provided.
"""
return cla.controllers.github.get_namespace(namespace)
#
# Gerrit instance routes
#
@hug.get('/project/{project_id}/gerrits', versions=1)
def get_project_gerrit_instance(project_id: hug.types.uuid):
"""
GET: /project/{project_id}/gerrits
Returns all CLA Gerrit instances for this project.
"""
return cla.controllers.gerrit.get_gerrit_by_project_id(project_id)
@hug.get('/gerrit/{gerrit_id}', versions=2)
def get_gerrit_instance(gerrit_id: hug.types.uuid):
"""
GET: /gerrit/gerrit_id
Returns Gerrit instance with the given gerrit id.
"""
return cla.controllers.gerrit.get_gerrit(gerrit_id)
@hug.post('/gerrit', versions=1)
def create_gerrit_instance(project_id: hug.types.uuid,
gerrit_name: hug.types.text,
gerrit_url: cla.hug_types.url,
group_id_icla=None,
group_id_ccla=None):
"""
POST: /gerrit
Creates a gerrit instance
"""
return cla.controllers.gerrit.create_gerrit(project_id, gerrit_name, gerrit_url, group_id_icla, group_id_ccla)
@hug.delete('/gerrit/{gerrit_id}', versions=1)
def delete_gerrit_instance(gerrit_id: hug.types.uuid):
"""
DELETE: /gerrit/{gerrit_id}
Deletes the specified gerrit instance.
"""
return cla.controllers.gerrit.delete_gerrit(gerrit_id)
@hug.get('/gerrit/{gerrit_id}/{contract_type}/agreementUrl.html', versions=2, output=hug.output_format.html)
def get_agreement_html(gerrit_id: hug.types.uuid, contract_type: hug.types.text):
"""
GET: /gerrit/{gerrit_id}/{contract_type}/agreementUrl.html
Generates an appropriate HTML file for display in the Gerrit console.
"""
return cla.controllers.gerrit.get_agreement_html(gerrit_id, contract_type)
# The following routes are only provided for project and cla manager
# permission management, and are not to be called by the UI Consoles.
@hug.get('/project/logo/{project_sfdc_id}', versions=1)
def upload_logo(auth_user: check_auth,
project_sfdc_id: hug.types.text):
return cla.controllers.project_logo.create_signed_logo_url(auth_user, project_sfdc_id)
@hug.post('/project/permission', versions=1)
def add_project_permission(auth_user: check_auth,
username: hug.types.text,
project_sfdc_id: hug.types.text):
return cla.controllers.project.add_permission(auth_user, username, project_sfdc_id)
@hug.delete('/project/permission', versions=1)
def remove_project_permission(auth_user: check_auth,
username: hug.types.text,
project_sfdc_id: hug.types.text):
return cla.controllers.project.remove_permission(auth_user, username, project_sfdc_id)
@hug.post('/company/permission', versions=1)
def add_company_permission(auth_user: check_auth,
username: hug.types.text,
company_id: hug.types.text):
return cla.controllers.company.add_permission(auth_user, username, company_id)
@hug.delete('/company/permission', versions=1)
def remove_company_permission(auth_user: check_auth,
username: hug.types.text,
company_id: hug.types.text):
return cla.controllers.company.remove_permission(auth_user, username, company_id)
# Session Middleware
__hug__.http.add_middleware(get_session_middleware())
__hug__.http.add_middleware(LogMiddleware(logger=cla.log))
| 38.357452 | 133 | 0.643412 |
f769f45a2f46f64a9eda8925d21c7b6218e1e3c8
| 2,240 |
py
|
Python
|
backend/schedule_worker/utils/przystanki.py
|
evemorgen/GdzieJestMojTramwajProject
|
65a090ae4222053a2a0a1b145df5196f3658065c
|
[
"MIT"
] | null | null | null |
backend/schedule_worker/utils/przystanki.py
|
evemorgen/GdzieJestMojTramwajProject
|
65a090ae4222053a2a0a1b145df5196f3658065c
|
[
"MIT"
] | null | null | null |
backend/schedule_worker/utils/przystanki.py
|
evemorgen/GdzieJestMojTramwajProject
|
65a090ae4222053a2a0a1b145df5196f3658065c
|
[
"MIT"
] | null | null | null |
import logging
import os
import json
import networkx as nx
from utils import Singleton
class Przystanki(metaclass=Singleton):
def __init__(self, path=None):
if path is None:
self.path = os.environ['TRAM_ROOT'] + "/data/przystanki_0_159.json"
else:
self.path = path
with open(self.path, 'r') as cfg_file:
self.cfg = json.load(cfg_file)
self.graph = nx.read_yaml(os.environ['TRAM_ROOT'] + "/data/graph.yaml")
self.petle = {k: v for k, v in self.cfg.items() if self.cfg[k]['petla'] is True}
self.skrzyzowania = {k: v for k, v in self.cfg.items() if self.cfg[k]['skrzyzowanie'] is True}
self.przystanki = {k: v for k, v in self.cfg.items() if self.cfg[k]['przystanek'] is True}
self.wszystkie = {**self.petle, **self.skrzyzowania, **self.przystanki}
logging.info('Przystanki initialised')
def get_edge(self, node1, node2):
edges = self.graph.edges(data=True)
for edge in edges:
if (edge[0] == node1 and edge[1] == node2) or (edge[0] == node2 and edge[1] == node1):
return edge
def set_queue(self, node1, node2, queue_name, queue):
self.graph[node1][node2][queue_name] = queue
def get_edges(self, line=None):
edges = self.graph.edges(data=True)
logging.info(edges)
res = []
for edge in edges:
coords = [{'latitude': self.wszystkie[edge[0]]['x'], 'longitude': self.wszystkie[edge[0]]['y']},
{'latitude': self.wszystkie[edge[1]]['x'], 'longitude': self.wszystkie[edge[1]]['y']},
]
if line is not None:
if str(line) not in edge[2]['linie']:
continue
res.append((coords, edge[2]['odleglosc']))
return res
def get(self, item, default=None):
return self.cfg.get(item, default)
def set(self, key, value):
self.cfg[key] = value
def dump(self):
with open(self.path, 'w') as out_file:
json.dump(self.cfg, out_file, indent=4)
def __getitem__(self, key):
return self.cfg.__getitem__(key)
def __contains__(self, key):
return key in self.cfg
| 34.461538 | 108 | 0.578571 |
f769f8f1a9d12106fd0291e9a5a6b9bc646e223b
| 116,388 |
py
|
Python
|
salt/state.py
|
skrobul/salt
|
ef7fb71082cce7a9783e00b9c65062fefae09263
|
[
"Apache-2.0"
] | null | null | null |
salt/state.py
|
skrobul/salt
|
ef7fb71082cce7a9783e00b9c65062fefae09263
|
[
"Apache-2.0"
] | null | null | null |
salt/state.py
|
skrobul/salt
|
ef7fb71082cce7a9783e00b9c65062fefae09263
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
The module used to execute states in salt. A state is unlike a module
execution in that instead of just executing a command it ensure that a
certain state is present on the system.
The data sent to the state calls is as follows:
{ 'state': '<state module name>',
'fun': '<state function name>',
'name': '<the name argument passed to all states>'
'argn': '<arbitrary argument, can have many of these>'
}
'''
# Import python libs
import os
import sys
import copy
import site
import fnmatch
import logging
import traceback
import datetime
# Import salt libs
import salt.utils
import salt.loader
import salt.minion
import salt.pillar
import salt.fileclient
import salt.utils.event
import salt.syspaths as syspaths
from salt.utils import context, immutabletypes
from salt._compat import string_types
from salt.template import compile_template, compile_template_str
from salt.exceptions import SaltRenderError, SaltReqTimeoutError, SaltException
from salt.utils.odict import OrderedDict, DefaultOrderedDict
log = logging.getLogger(__name__)
STATE_INTERNAL_KEYWORDS = frozenset([
# These are keywords passed to state module functions which are to be used
# by salt in this state module and not on the actual state module function
'fun',
'order',
'state',
'watch',
'watch_in',
'prereq',
'prereq_in',
'require',
'require_in',
'onfail',
'fail_hard',
'reload_modules',
'saltenv',
'__id__',
'__sls__',
'__env__',
'__pub_user',
'__pub_arg',
'__pub_jid',
'__pub_fun',
'__pub_tgt',
'__pub_ret',
'__pub_tgt_type',
])
def split_low_tag(tag):
'''
Take a low tag and split it back into the low dict that it came from
'''
state, id_, name, fun = tag.split('_|-')
return {'state': state,
'__id__': id_,
'name': name,
'fun': fun}
def _gen_tag(low):
'''
Generate the running dict tag string from the low data structure
'''
return '{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(low)
def trim_req(req):
'''
Trim any function off of a requisite
'''
reqfirst = next(iter(req))
if '.' in reqfirst:
return {reqfirst.split('.')[0]: req[reqfirst]}
return req
def state_args(id_, state, high):
'''
Return a set of the arguments passed to the named state
'''
args = set()
if id_ not in high:
return args
if state not in high[id_]:
return args
for item in high[id_][state]:
if not isinstance(item, dict):
continue
if len(item) != 1:
continue
args.add(next(iter(item)))
return args
def find_name(name, state, high):
'''
Scan high data for the id referencing the given name
'''
ext_id = ''
if name in high:
ext_id = name
else:
# We need to scan for the name
for nid in high:
if state in high[nid]:
if isinstance(
high[nid][state],
list):
for arg in high[nid][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if arg[next(iter(arg))] == name:
ext_id = nid
return ext_id
def format_log(ret):
'''
Format the state into a log message
'''
msg = ''
if isinstance(ret, dict):
# Looks like the ret may be a valid state return
if 'changes' in ret:
# Yep, looks like a valid state return
chg = ret['changes']
if not chg:
if ret['comment']:
msg = ret['comment']
else:
msg = 'No changes made for {0[name]}'.format(ret)
elif isinstance(chg, dict):
if 'diff' in chg:
if isinstance(chg['diff'], string_types):
msg = 'File changed:\n{0}'.format(chg['diff'])
if all([isinstance(x, dict) for x in chg.values()]):
if all([('old' in x and 'new' in x)
for x in chg.values()]):
# This is the return data from a package install
msg = 'Installed Packages:\n'
for pkg in chg:
old = chg[pkg]['old'] or 'absent'
new = chg[pkg]['new'] or 'absent'
msg += '{0} changed from {1} to ' \
'{2}\n'.format(pkg, old, new)
if not msg:
msg = str(ret['changes'])
if ret['result'] is True or ret['result'] is None:
log.info(msg)
else:
log.error(msg)
else:
# catch unhandled data
log.info(str(ret))
def master_compile(master_opts, minion_opts, grains, id_, saltenv):
'''
Compile the master side low state data, and build the hidden state file
'''
st_ = MasterHighState(master_opts, minion_opts, grains, id_, saltenv)
return st_.compile_highstate()
def ishashable(obj):
try:
hash(obj)
except TypeError:
return False
return True
class StateError(Exception):
'''
Custom exception class.
'''
pass
class Compiler(object):
'''
Class used to compile and manage the High Data structure
'''
def __init__(self, opts):
self.opts = opts
self.rend = salt.loader.render(self.opts, {})
def render_template(self, template, **kwargs):
'''
Enforce the states in a template
'''
high = compile_template(
template, self.rend, self.opts['renderer'], **kwargs)
if not high:
return high
return self.pad_funcs(high)
def pad_funcs(self, high):
'''
Turns dot delimited function refs into function strings
'''
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], string_types):
# Is this is a short state? It needs to be padded!
if '.' in high[name]:
comps = high[name].split('.')
if len(comps) >= 2:
# Merge the comps
comps[1] = '.'.join(comps[1:len(comps)])
high[name] = {
#'__sls__': template,
#'__env__': None,
comps[0]: [comps[1]]
}
continue
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith('_'):
continue
if not isinstance(high[name][key], list):
continue
if '.' in key:
comps = key.split('.')
if len(comps) >= 2:
# Merge the comps
comps[1] = '.'.join(comps[1:len(comps)])
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high
def verify_high(self, high):
'''
Verify that the high data is viable and follows the data structure
'''
errors = []
if not isinstance(high, dict):
errors.append('High data is not a dictionary and is invalid')
reqs = {}
for name, body in high.items():
if name.startswith('__'):
continue
if not isinstance(name, string_types):
errors.append(
'ID {0!r} in SLS {1!r} is not formed as a string, but is '
'a {2}'.format(name, body['__sls__'], type(name).__name__)
)
if not isinstance(body, dict):
err = ('The type {0} in {1} is not formatted as a dictionary'
.format(name, body))
errors.append(err)
continue
for state in body:
if state.startswith('__'):
continue
if not isinstance(body[state], list):
errors.append(
'State {0!r} in SLS {1!r} is not formed as a list'
.format(name, body['__sls__'])
)
else:
fun = 0
if '.' in state:
fun += 1
for arg in body[state]:
if isinstance(arg, string_types):
fun += 1
if ' ' in arg.strip():
errors.append(('The function "{0}" in state '
'"{1}" in SLS "{2}" has '
'whitespace, a function with whitespace is '
'not supported, perhaps this is an argument '
'that is missing a ":"').format(
arg,
name,
body['__sls__']))
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst in ('require', 'watch', 'prereq'):
if not isinstance(arg[argfirst], list):
errors.append(('The {0}'
' statement in state {1!r} in SLS {2!r} '
'needs to be formed as a list').format(
argfirst,
name,
body['__sls__']
))
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = {'state': state}
for req in arg[argfirst]:
if not isinstance(req, dict):
err = ('Requisite declaration {0}'
' in SLS {1} is not formed as a'
' single key dictionary').format(
req,
body['__sls__'])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if '.' in req_key:
errors.append((
'Invalid requisite type {0!r} '
'in state {1!r}, in SLS '
'{2!r}. Requisite types must '
'not contain dots, did you '
'mean {3!r}?'.format(
req_key,
name,
body['__sls__'],
req_key[:req_key.find('.')]
)
))
if not ishashable(req_val):
errors.append((
'Illegal requisite "{0}", '
'is SLS {1}\n'
).format(
str(req_val),
body['__sls__']))
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if reqs[req_val]['state'] == reqs[name][req_val]:
err = ('A recursive '
'requisite was found, SLS '
'"{0}" ID "{1}" ID "{2}"'
).format(
body['__sls__'],
name,
req_val
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(('Multiple dictionaries '
'defined in argument of state {0!r} in SLS'
' {1!r}').format(
name,
body['__sls__']))
if not fun:
if state == 'require' or state == 'watch':
continue
errors.append(('No function declared in state {0!r} in'
' SLS {1!r}').format(state, body['__sls__']))
elif fun > 1:
errors.append(
'Too many functions declared in state {0!r} in '
'SLS {1!r}'.format(state, body['__sls__'])
)
return errors
def order_chunks(self, chunks):
'''
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
'''
cap = 1
for chunk in chunks:
if 'order' in chunk:
if not isinstance(chunk['order'], int):
continue
chunk_order = chunk['order']
if 'name_order' in chunk:
chunk_order = chunk_order + chunk['name_order']
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if 'order' not in chunk:
chunk['order'] = cap
else:
if isinstance(chunk['order'], int) and 'name_order' in chunk:
chunk['order'] = chunk['order'] + chunk.pop('name_order')
if not isinstance(chunk['order'], int):
if chunk['order'] == 'last':
chunk['order'] = cap + 1000000
else:
chunk['order'] = cap
elif isinstance(chunk['order'], int) and chunk['order'] < 0:
chunk['order'] = cap + 1000000 + chunk['order']
chunks.sort(key=lambda chunk: (chunk['order'], '{0[state]}{0[name]}{0[fun]}'.format(chunk)))
return chunks
def compile_high_data(self, high):
'''
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
'''
chunks = []
for name, body in high.items():
if name.startswith('__'):
continue
for state, run in body.items():
funcs = set()
names = set()
if state.startswith('__'):
continue
chunk = {'state': state,
'name': name}
if '__sls__' in body:
chunk['__sls__'] = body['__sls__']
if '__env__' in body:
chunk['__env__'] = body['__env__']
chunk['__id__'] = name
for arg in run:
if isinstance(arg, string_types):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in arg.items():
if key == 'names':
names.update(val)
continue
else:
chunk.update(arg)
if names:
name_order = 1
for low_name in names:
live = copy.deepcopy(chunk)
live['name'] = low_name
live['name_order'] = name_order
name_order = name_order + 1
for fun in funcs:
live['fun'] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live['fun'] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def apply_exclude(self, high):
'''
Read in the __exclude__ list and remove all excluded objects from the
high data
'''
if '__exclude__' not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop('__exclude__')
for exc in exclude:
if isinstance(exc, str):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = exc.keys()[0]
if key == 'sls':
ex_sls.add(exc['sls'])
elif key == 'id':
ex_id.add(exc['id'])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associtaed ids
for name, body in high.items():
if name.startswith('__'):
continue
if body.get('__sls__', '') in ex_sls:
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
class State(object):
'''
Class used to execute salt states
'''
def __init__(self, opts, pillar=None, jid=None):
if 'grains' not in opts:
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
self._pillar_override = pillar
self.opts['pillar'] = self._gather_pillar()
self.state_con = {}
self.load_modules()
self.active = set()
self.mod_init = set()
self.pre = {}
self.__run_num = 0
self.jid = jid
def _gather_pillar(self):
'''
Whenever a state run starts, gather the pillar data fresh
'''
pillar = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
)
ret = pillar.compile_pillar()
if self._pillar_override and isinstance(self._pillar_override, dict):
ret.update(self._pillar_override)
return ret
def _mod_init(self, low):
'''
Check the module initialization function, if this is the first run
of a state package that has a mod_init function, then execute the
mod_init function in the state module.
'''
minit = '{0}.mod_init'.format(low['state'])
if low['state'] not in self.mod_init:
if minit in self.states:
mret = self.states[minit](low)
if not mret:
return
self.mod_init.add(low['state'])
def load_modules(self, data=None):
'''
Load the modules into the state
'''
log.info('Loading fresh modules for state activity')
self.functions = salt.loader.minion_mods(self.opts, self.state_con)
if isinstance(data, dict):
if data.get('provider', False):
if isinstance(data['provider'], str):
providers = [{data['state']: data['provider']}]
elif isinstance(data['provider'], list):
providers = data['provider']
else:
providers = {}
for provider in providers:
for mod in provider:
funcs = salt.loader.raw_mod(self.opts,
provider[mod],
self.functions)
if funcs:
for func in funcs:
f_key = '{0}{1}'.format(
mod,
func[func.rindex('.'):]
)
self.functions[f_key] = funcs[func]
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions, states=self.states)
def module_refresh(self):
'''
Refresh all the modules
'''
log.debug('Refreshing modules...')
if self.opts['grains'].get('os') != 'MacOS':
# In case a package has been installed into the current python
# process 'site-packages', the 'site' module needs to be reloaded in
# order for the newly installed package to be importable.
reload(site)
self.load_modules()
if not self.opts.get('local', False):
self.functions['saltutil.refresh_modules']()
def check_refresh(self, data, ret):
'''
Check to see if the modules for this state instance need to be updated,
only update if the state is a file or a package and if it changed
something. If the file function is managed check to see if the file is a
possible module type, e.g. a python, pyx, or .so. Always refresh if the
function is recurse, since that can lay down anything.
'''
if data.get('reload_modules', False) is True:
# User explicitly requests a reload
self.module_refresh()
return
if not ret['changes']:
return
if data['state'] == 'file':
if data['fun'] == 'managed':
if data['name'].endswith(
('.py', '.pyx', '.pyo', '.pyc', '.so')):
self.module_refresh()
elif data['fun'] == 'recurse':
self.module_refresh()
elif data['fun'] == 'symlink':
if 'bin' in data['name']:
self.module_refresh()
elif data['state'] in ('pkg', 'ports'):
self.module_refresh()
def verify_ret(self, ret):
'''
Verify the state return data
'''
if not isinstance(ret, dict):
raise SaltException(
'Malformed state return, return must be a dict'
)
bad = []
for val in ['name', 'result', 'changes', 'comment']:
if val not in ret:
bad.append(val)
if bad:
raise SaltException(
('The following keys were not present in the state '
'return: {0}'
).format(','.join(bad)))
def verify_data(self, data):
'''
Verify the data, return an error statement if something is wrong
'''
errors = []
if 'state' not in data:
errors.append('Missing "state" data')
if 'fun' not in data:
errors.append('Missing "fun" data')
if 'name' not in data:
errors.append('Missing "name" data')
if data['name'] and not isinstance(data['name'], string_types):
errors.append(
'ID {0!r} in SLS {1!r} is not formed as a string, but is '
'a {2}'.format(
data['name'], data['__sls__'], type(data['name']).__name__)
)
if errors:
return errors
full = data['state'] + '.' + data['fun']
if full not in self.states:
if '__sls__' in data:
errors.append(
'State {0!r} found in SLS {1!r} is unavailable'.format(
full,
data['__sls__']
)
)
else:
errors.append(
'Specified state {0!r} is unavailable.'.format(
full
)
)
else:
# First verify that the parameters are met
aspec = salt.utils.get_function_argspec(self.states[full])
arglen = 0
deflen = 0
if isinstance(aspec.args, list):
arglen = len(aspec.args)
if isinstance(aspec.defaults, tuple):
deflen = len(aspec.defaults)
for ind in range(arglen - deflen):
if aspec.args[ind] not in data:
errors.append(
'Missing parameter {0} for state {1}'.format(
aspec.args[ind],
full
)
)
# If this chunk has a recursive require, then it will cause a
# recursive loop when executing, check for it
reqdec = ''
if 'require' in data:
reqdec = 'require'
if 'watch' in data:
# Check to see if the service has a mod_watch function, if it does
# not, then just require
# to just require extend the require statement with the contents
# of watch so that the mod_watch function is not called and the
# requisite capability is still used
if '{0}.mod_watch'.format(data['state']) not in self.states:
if 'require' in data:
data['require'].extend(data.pop('watch'))
else:
data['require'] = data.pop('watch')
reqdec = 'require'
else:
reqdec = 'watch'
if reqdec:
for req in data[reqdec]:
reqfirst = next(iter(req))
if data['state'] == reqfirst:
if (fnmatch.fnmatch(data['name'], req[reqfirst])
or fnmatch.fnmatch(data['__id__'], req[reqfirst])):
err = ('Recursive require detected in SLS {0} for'
' require {1} in ID {2}').format(
data['__sls__'],
req,
data['__id__'])
errors.append(err)
return errors
def verify_high(self, high):
'''
Verify that the high data is viable and follows the data structure
'''
errors = []
if not isinstance(high, dict):
errors.append('High data is not a dictionary and is invalid')
reqs = {}
for name, body in high.items():
if name.startswith('__'):
continue
if not isinstance(name, string_types):
errors.append(
'ID {0!r} in SLS {1!r} is not formed as a string, but '
'is a {2}'.format(
name, body['__sls__'], type(name).__name__)
)
if not isinstance(body, dict):
err = ('The type {0} in {1} is not formatted as a dictionary'
.format(name, body))
errors.append(err)
continue
for state in body:
if state.startswith('__'):
continue
if body[state] is None:
errors.append(
'ID {0!r} in SLS {1!r} contains a short declaration '
'({2}) with a trailing colon. When not passing any '
'arguments to a state, the colon must be omitted.'
.format(name, body['__sls__'], state)
)
continue
if not isinstance(body[state], list):
errors.append(
'State {0!r} in SLS {1!r} is not formed as a list'
.format(name, body['__sls__'])
)
else:
fun = 0
if '.' in state:
fun += 1
for arg in body[state]:
if isinstance(arg, string_types):
fun += 1
if ' ' in arg.strip():
errors.append(('The function "{0}" in state '
'"{1}" in SLS "{2}" has '
'whitespace, a function with whitespace is '
'not supported, perhaps this is an argument '
'that is missing a ":"').format(
arg,
name,
body['__sls__']))
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst == 'names':
if not isinstance(arg[argfirst], list):
errors.append(
'The \'names\' argument in state '
'{0!r} in SLS {1!r} needs to be '
'formed as a list'
.format(name, body['__sls__'])
)
if argfirst in ('require', 'watch', 'prereq'):
if not isinstance(arg[argfirst], list):
errors.append(
'The {0} statement in state {1!r} in '
'SLS {2!r} needs to be formed as a '
'list'.format(argfirst,
name,
body['__sls__'])
)
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = {'state': state}
for req in arg[argfirst]:
if not isinstance(req, dict):
err = ('Requisite declaration {0}'
' in SLS {1} is not formed as a'
' single key dictionary').format(
req,
body['__sls__'])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if '.' in req_key:
errors.append((
'Invalid requisite type {0!r} '
'in state {1!r}, in SLS '
'{2!r}. Requisite types must '
'not contain dots, did you '
'mean {3!r}?'.format(
req_key,
name,
body['__sls__'],
req_key[:req_key.find('.')]
)
))
if not ishashable(req_val):
errors.append((
'Illegal requisite "{0}", '
'please check your syntax.\n'
).format(str(req_val)))
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if reqs[req_val]['state'] == reqs[name][req_val]:
err = ('A recursive '
'requisite was found, SLS '
'"{0}" ID "{1}" ID "{2}"'
).format(
body['__sls__'],
name,
req_val
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(
'Multiple dictionaries defined in '
'argument of state {0!r} in SLS {1!r}'
.format(name, body['__sls__'])
)
if not fun:
if state == 'require' or state == 'watch':
continue
errors.append(
'No function declared in state {0!r} in SLS {1!r}'
.format(state, body['__sls__'])
)
elif fun > 1:
errors.append(
'Too many functions declared in state {0!r} in '
'SLS {1!r}'.format(state, body['__sls__'])
)
return errors
def verify_chunks(self, chunks):
'''
Verify the chunks in a list of low data structures
'''
err = []
for chunk in chunks:
err += self.verify_data(chunk)
return err
def order_chunks(self, chunks):
'''
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
'''
cap = 1
for chunk in chunks:
if 'order' in chunk:
if not isinstance(chunk['order'], int):
continue
chunk_order = chunk['order']
if 'name_order' in chunk:
chunk_order = chunk_order + chunk['name_order']
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if 'order' not in chunk:
chunk['order'] = cap
else:
if isinstance(chunk['order'], int) and 'name_order' in chunk:
chunk['order'] = chunk['order'] + chunk.pop('name_order')
if not isinstance(chunk['order'], int):
if chunk['order'] == 'last':
chunk['order'] = cap + 1000000
else:
chunk['order'] = cap
elif isinstance(chunk['order'], int) and chunk['order'] < 0:
chunk['order'] = cap + 1000000 + chunk['order']
chunks.sort(key=lambda k: (k['order'], '{0[state]}{0[name]}{0[fun]}'.format(k)))
return chunks
def compile_high_data(self, high):
'''
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
'''
chunks = []
for name, body in high.items():
if name.startswith('__'):
continue
for state, run in body.items():
funcs = set()
names = set()
if state.startswith('__'):
continue
chunk = {'state': state,
'name': name}
if '__sls__' in body:
chunk['__sls__'] = body['__sls__']
if '__env__' in body:
chunk['__env__'] = body['__env__']
chunk['__id__'] = name
for arg in run:
if isinstance(arg, string_types):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in arg.items():
if key == 'names':
names.update(val)
elif key == 'state':
# Don't pass down a state override
continue
elif (key == 'name' and
not isinstance(val, string_types)):
# Invalid name, fall back to ID
chunk[key] = name
else:
chunk[key] = val
if names:
name_order = 1
for low_name in names:
live = copy.deepcopy(chunk)
live['name'] = low_name
live['name_order'] = name_order
name_order = name_order + 1
for fun in funcs:
live['fun'] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live['fun'] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def reconcile_extend(self, high):
'''
Pull the extend data and add it to the respective high data
'''
errors = []
if '__extend__' not in high:
return high, errors
ext = high.pop('__extend__')
for ext_chunk in ext:
for name, body in ext_chunk.items():
if name not in high:
state_type = next(
x for x in body if not x.startswith('__')
)
# Check for a matching 'name' override in high data
id_ = find_name(name, state_type, high)
if id_:
name = id_
else:
errors.append(
'Cannot extend ID {0} in \'{1}:{2}\'. It is not '
'part of the high state.'.format(
name,
body.get('__env__', 'base'),
body.get('__sls__', 'base'))
)
continue
for state, run in body.items():
if state.startswith('__'):
continue
if state not in high[name]:
high[name][state] = run
continue
# high[name][state] is extended by run, both are lists
for arg in run:
update = False
for hind in range(len(high[name][state])):
if (isinstance(arg, string_types) and
isinstance(high[name][state][hind], string_types)):
# replacing the function, replace the index
high[name][state].pop(hind)
high[name][state].insert(hind, arg)
update = True
continue
if (isinstance(arg, dict) and
isinstance(high[name][state][hind], dict)):
# It is an option, make sure the options match
argfirst = next(iter(arg))
if (argfirst ==
next(iter(high[name][state][hind]))):
# They match, check if the option is a
# watch or require, append, otherwise
# replace
if (argfirst == 'require' or
argfirst == 'watch'):
# Extend the list
(high[name][state][hind][argfirst]
.extend(arg[argfirst]))
update = True
else:
# Replace the value
high[name][state][hind] = arg
update = True
if (argfirst == 'name' and
next(iter(high[name][state][hind])) == 'names'):
# If names are overwritten by name use the name
high[name][state][hind] = arg
if not update:
high[name][state].append(arg)
return high, errors
def apply_exclude(self, high):
'''
Read in the __exclude__ list and remove all excluded objects from the
high data
'''
if '__exclude__' not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop('__exclude__')
for exc in exclude:
if isinstance(exc, str):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = exc.keys()[0]
if key == 'sls':
ex_sls.add(exc['sls'])
elif key == 'id':
ex_id.add(exc['id'])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associated ids
for name, body in high.items():
if name.startswith('__'):
continue
sls = body.get('__sls__', '')
if not sls:
continue
for ex_ in ex_sls:
if fnmatch.fnmatch(sls, ex_):
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
def requisite_in(self, high):
'''
Extend the data reference with requisite_in arguments
'''
req_in = set([
'require_in',
'watch_in',
'onfail_in',
'onchanges_in',
'use',
'use_in',
'prereq',
'prereq_in',
])
req_in_all = req_in.union(
set([
'require',
'watch',
'onfail',
'onchanges',
]))
extend = {}
errors = []
for id_, body in high.items():
if not isinstance(body, dict):
continue
for state, run in body.items():
if state.startswith('__'):
continue
for arg in run:
if isinstance(arg, dict):
# It is not a function, verify that the arg is a
# requisite in statement
if len(arg) < 1:
# Empty arg dict
# How did we get this far?
continue
# Split out the components
key = next(iter(arg))
if key not in req_in:
continue
rkey = key.split('_')[0]
items = arg[key]
if isinstance(items, dict):
# Formatted as a single req_in
for _state, name in items.items():
# Not a use requisite_in
found = False
if name not in extend:
extend[name] = {}
if '.' in _state:
errors.append((
'Invalid requisite in {0}: {1} for '
'{2}, in SLS {3!r}. Requisites must '
'not contain dots, did you mean {4!r}?'
.format(
rkey,
_state,
name,
body['__sls__'],
_state[:_state.find('.')]
)
))
_state = _state.split(".")[0]
if _state not in extend[name]:
extend[name][_state] = []
extend[name]['__env__'] = body['__env__']
extend[name]['__sls__'] = body['__sls__']
for ind in range(len(extend[name][_state])):
if next(iter(
extend[name][_state][ind])) == rkey:
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append(
{rkey: [{state: id_}]}
)
if isinstance(items, list):
# Formed as a list of requisite additions
for ind in items:
if not isinstance(ind, dict):
# Malformed req_in
continue
if len(ind) < 1:
continue
_state = next(iter(ind))
name = ind[_state]
if '.' in _state:
errors.append((
'Invalid requisite in {0}: {1} for '
'{2}, in SLS {3!r}. Requisites must '
'not contain dots, did you mean {4!r}?'
.format(
rkey,
_state,
name,
body['__sls__'],
_state[:_state.find('.')]
)
))
_state = _state.split(".")[0]
if key == 'prereq_in':
# Add prerequired to origin
if id_ not in extend:
extend[id_] = {}
if state not in extend[id_]:
extend[id_][state] = []
extend[id_][state].append(
{'prerequired': [{_state: name}]}
)
if key == 'prereq':
# Add prerequired to prereqs
ext_id = find_name(name, _state, high)
if not ext_id:
continue
if ext_id not in extend:
extend[ext_id] = {}
if _state not in extend[ext_id]:
extend[ext_id][_state] = []
extend[ext_id][_state].append(
{'prerequired': [{state: id_}]}
)
continue
if key == 'use_in':
# Add the running states args to the
# use_in states
ext_id = find_name(name, _state, high)
if not ext_id:
continue
ext_args = state_args(ext_id, _state, high)
if ext_id not in extend:
extend[ext_id] = {}
if _state not in extend[ext_id]:
extend[ext_id][_state] = []
ignore_args = req_in_all.union(ext_args)
for arg in high[id_][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if arg.keys()[0] == 'name':
continue
if arg.keys()[0] == 'names':
continue
extend[ext_id][_state].append(arg)
continue
if key == 'use':
# Add the use state's args to the
# running state
ext_id = find_name(name, _state, high)
if not ext_id:
continue
loc_args = state_args(id_, state, high)
if id_ not in extend:
extend[id_] = {}
if state not in extend[id_]:
extend[id_][state] = []
ignore_args = req_in_all.union(loc_args)
for arg in high[ext_id][_state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if arg.keys()[0] == 'name':
continue
if arg.keys()[0] == 'names':
continue
extend[id_][state].append(arg)
continue
found = False
if name not in extend:
extend[name] = {}
if _state not in extend[name]:
extend[name][_state] = []
extend[name]['__env__'] = body['__env__']
extend[name]['__sls__'] = body['__sls__']
for ind in range(len(extend[name][_state])):
if next(iter(
extend[name][_state][ind])) == rkey:
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append(
{rkey: [{state: id_}]}
)
high['__extend__'] = []
for key, val in extend.items():
high['__extend__'].append({key: val})
req_in_high, req_in_errors = self.reconcile_extend(high)
errors.extend(req_in_errors)
return req_in_high, errors
def call(self, low, chunks=None, running=None):
'''
Call a state directly with the low data structure, verify data
before processing.
'''
log.info('Running state [{0}] at time {1}'.format(low['name'], datetime.datetime.now().time().isoformat()))
errors = self.verify_data(low)
if errors:
ret = {
'result': False,
'name': low['name'],
'changes': {},
'comment': '',
}
for err in errors:
ret['comment'] += '{0}\n'.format(err)
ret['__run_num__'] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
return ret
if not low.get('__prereq__'):
log.info(
'Executing state {0[state]}.{0[fun]} for {0[name]}'.format(
low
)
)
if 'provider' in low:
self.load_modules(low)
state_func_name = '{0[state]}.{0[fun]}'.format(low)
cdata = salt.utils.format_call(
self.states[state_func_name], low,
initial_ret={'full': state_func_name},
expected_extra_kws=STATE_INTERNAL_KEYWORDS
)
inject_globals = {
# Pass a copy of the running dictionary, the low state chunks and
# the current state dictionaries.
# We pass deep copies here because we don't want any misbehaving
# state module to change these at runtime.
'__low__': immutabletypes.freeze(low),
'__running__': immutabletypes.freeze(running) if running else {},
'__lowstate__': immutabletypes.freeze(chunks) if chunks else {}
}
if low.get('__prereq__'):
test = sys.modules[self.states[cdata['full']].__module__].__opts__['test']
sys.modules[self.states[cdata['full']].__module__].__opts__['test'] = True
try:
# Let's get a reference to the salt environment to use within this
# state call.
#
# If the state function accepts an 'env' keyword argument, it
# allows the state to be overridden(we look for that in cdata). If
# that's not found in cdata, we look for what we're being passed in
# the original data, namely, the special dunder __env__. If that's
# not found we default to 'base'
if 'saltenv' in low:
inject_globals['__env__'] = low['saltenv']
elif isinstance(cdata['kwargs'].get('env', None), string_types):
# User is using a deprecated env setting which was parsed by
# format_call.
# We check for a string type since module functions which
# allow setting the OS environ also make use of the "env"
# keyword argument, which is not a string
inject_globals['__env__'] = cdata['kwargs']['env']
elif '__env__' in low:
# The user is passing an alternative environment using __env__
# which is also not the appropriate choice, still, handle it
inject_globals['__env__'] = low['__env__']
else:
# Let's use the default environment
inject_globals['__env__'] = 'base'
with context.func_globals_inject(self.states[cdata['full']],
**inject_globals):
ret = self.states[cdata['full']](*cdata['args'],
**cdata['kwargs'])
self.verify_ret(ret)
except Exception:
trb = traceback.format_exc()
# There are a number of possibilities to not have the cdata
# populated with what we might have expected, so just be enought
# smart to not raise another KeyError as the name is easily
# guessable and fallback in all cases to present the real
# exception to the user
if len(cdata['args']) > 0:
name = cdata['args'][0]
elif 'name' in cdata['kwargs']:
name = cdata['kwargs']['name']
else:
name = low.get('name', low.get('__id__'))
ret = {
'result': False,
'name': name,
'changes': {},
'comment': 'An exception occurred in this state: {0}'.format(
trb)
}
finally:
if low.get('__prereq__'):
sys.modules[self.states[cdata['full']].__module__].__opts__[
'test'] = test
# If format_call got any warnings, let's show them to the user
if 'warnings' in cdata:
ret.setdefault('warnings', []).extend(cdata['warnings'])
if 'provider' in low:
self.load_modules()
if low.get('__prereq__'):
low['__prereq__'] = False
return ret
ret['__run_num__'] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
log.info('Completed state [{0}] at time {1}'.format(low['name'], datetime.datetime.now().time().isoformat()))
return ret
def call_chunks(self, chunks):
'''
Iterate over a list of chunks and call them, checking for requires.
'''
running = {}
for low in chunks:
if '__FAILHARD__' in running:
running.pop('__FAILHARD__')
return running
tag = _gen_tag(low)
if tag not in running:
running = self.call_chunk(low, running, chunks)
if self.check_failhard(low, running):
return running
self.active = set()
return running
def check_failhard(self, low, running):
'''
Check if the low data chunk should send a failhard signal
'''
tag = _gen_tag(low)
if (low.get('failhard', False) or self.opts['failhard']
and tag in running):
return not running[tag]['result']
return False
def check_requisite(self, low, running, chunks, pre=False):
'''
Look into the running data to check the status of all requisite
states
'''
present = False
# If mod_watch is not available make it a require
if 'watch' in low:
if '{0}.mod_watch'.format(low['state']) not in self.states:
if 'require' in low:
low['require'].extend(low.pop('watch'))
else:
low['require'] = low.pop('watch')
else:
present = True
if 'require' in low:
present = True
if 'prerequired' in low:
present = True
if 'prereq' in low:
present = True
if 'onfail' in low:
present = True
if 'onchanges' in low:
present = True
if not present:
return 'met'
reqs = {
'require': [],
'watch': [],
'prereq': [],
'onfail': [],
'onchanges': []}
if pre:
reqs['prerequired'] = []
for r_state in reqs:
if r_state in low and low[r_state] is not None:
for req in low[r_state]:
req = trim_req(req)
found = False
for chunk in chunks:
req_key = next(iter(req))
req_val = req[req_key]
if req_val is None:
continue
if (fnmatch.fnmatch(chunk['name'], req_val) or
fnmatch.fnmatch(chunk['__id__'], req_val)):
if chunk['state'] == req_key:
found = True
reqs[r_state].append(chunk)
elif req_key == 'sls':
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk['__sls__'], req_val):
found = True
reqs[r_state].append(chunk)
if not found:
return 'unmet'
fun_stats = set()
for r_state, chunks in reqs.items():
if r_state == 'prereq':
run_dict = self.pre
else:
run_dict = running
for chunk in chunks:
tag = _gen_tag(chunk)
if tag not in run_dict:
fun_stats.add('unmet')
continue
if r_state == 'onfail':
if run_dict[tag]['result'] is True:
fun_stats.add('fail')
continue
else:
if run_dict[tag]['result'] is False:
fun_stats.add('fail')
continue
if r_state == 'onchanges':
if not run_dict[tag]['changes']:
fun_stats.add('fail')
continue
if r_state == 'watch' and run_dict[tag]['changes']:
fun_stats.add('change')
continue
if r_state == 'prereq' and run_dict[tag]['result'] is None:
fun_stats.add('premet')
if r_state == 'prereq' and not run_dict[tag]['result'] is None:
fun_stats.add('pre')
else:
fun_stats.add('met')
if 'unmet' in fun_stats:
return 'unmet'
elif 'fail' in fun_stats:
return 'fail'
elif 'pre' in fun_stats:
if 'premet' in fun_stats:
return 'met'
return 'pre'
elif 'change' in fun_stats:
return 'change'
return 'met'
def event(self, chunk_ret, length):
'''
Fire an event on the master bus
'''
if not self.opts.get('local') and self.opts.get('state_events', True) and self.opts.get('master_uri'):
ret = {'ret': chunk_ret,
'len': length}
tag = salt.utils.event.tagify(
[self.jid, 'prog', self.opts['id'], str(chunk_ret['__run_num__'])], 'job'
)
preload = {'jid': self.jid}
self.functions['event.fire_master'](ret, tag, preload=preload)
def call_chunk(self, low, running, chunks):
'''
Check if a chunk has any requires, execute the requires and then
the chunk
'''
self._mod_init(low)
tag = _gen_tag(low)
if not low.get('prerequired'):
self.active.add(tag)
requisites = ['require', 'watch', 'prereq', 'onfail', 'onchanges']
if not low.get('__prereq__'):
requisites.append('prerequired')
status = self.check_requisite(low, running, chunks, True)
else:
status = self.check_requisite(low, running, chunks)
if status == 'unmet':
lost = {}
reqs = []
for requisite in requisites:
lost[requisite] = []
if requisite not in low:
continue
for req in low[requisite]:
req = trim_req(req)
found = False
for chunk in chunks:
req_key = next(iter(req))
req_val = req[req_key]
if req_val is None:
continue
if (fnmatch.fnmatch(chunk['name'], req_val) or
fnmatch.fnmatch(chunk['__id__'], req_val)):
if chunk['state'] == req_key:
if requisite == 'prereq':
chunk['__prereq__'] = True
elif requisite == 'prerequired':
chunk['__prerequired__'] = True
reqs.append(chunk)
found = True
elif req_key == 'sls':
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk['__sls__'], req_val):
if requisite == 'prereq':
chunk['__prereq__'] = True
reqs.append(chunk)
found = True
if not found:
lost[requisite].append(req)
if lost['require'] or lost['watch'] or lost['prereq'] or lost['onfail'] or lost['onchanges'] or lost.get('prerequired'):
comment = 'The following requisites were not found:\n'
for requisite, lreqs in lost.items():
if not lreqs:
continue
comment += \
'{0}{1}:\n'.format(' ' * 19, requisite)
for lreq in lreqs:
req_key = next(iter(lreq))
req_val = lreq[req_key]
comment += \
'{0}{1}: {2}\n'.format(' ' * 23, req_key, req_val)
running[tag] = {'changes': {},
'result': False,
'comment': comment,
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
self.event(running[tag], len(chunks))
return running
for chunk in reqs:
# Check to see if the chunk has been run, only run it if
# it has not been run already
ctag = _gen_tag(chunk)
if ctag not in running:
if ctag in self.active:
if chunk.get('__prerequired__'):
# Prereq recusive, run this chunk with prereq on
if tag not in self.pre:
low['__prereq__'] = True
self.pre[ctag] = self.call(low, chunks, running)
return running
else:
return running
elif ctag not in running:
log.error('Recursive requisite found')
running[tag] = {
'changes': {},
'result': False,
'comment': 'Recursive requisite found',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
self.event(running[tag], len(chunks))
return running
running = self.call_chunk(chunk, running, chunks)
if self.check_failhard(chunk, running):
running['__FAILHARD__'] = True
return running
if low.get('__prereq__'):
status = self.check_requisite(low, running, chunks)
self.pre[tag] = self.call(low, chunks, running)
if not self.pre[tag]['changes'] and status == 'change':
self.pre[tag]['changes'] = {'watch': 'watch'}
self.pre[tag]['result'] = None
else:
running = self.call_chunk(low, running, chunks)
if self.check_failhard(chunk, running):
running['__FAILHARD__'] = True
return running
elif status == 'met':
if low.get('__prereq__'):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
elif status == 'fail':
running[tag] = {'changes': {},
'result': False,
'comment': 'One or more requisite failed',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
elif status == 'change' and not low.get('__prereq__'):
ret = self.call(low, chunks, running)
if not ret['changes']:
low = low.copy()
low['sfun'] = low['fun']
low['fun'] = 'mod_watch'
ret = self.call(low, chunks, running)
running[tag] = ret
elif status == 'pre':
pre_ret = {'changes': {},
'result': True,
'comment': 'No changes detected',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
running[tag] = pre_ret
self.pre[tag] = pre_ret
self.__run_num += 1
else:
if low.get('__prereq__'):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
if tag in running:
self.event(running[tag], len(chunks))
return running
def call_high(self, high):
'''
Process a high data call and ensure the defined states.
'''
errors = []
# If there is extension data reconcile it
high, ext_errors = self.reconcile_extend(high)
errors += ext_errors
errors += self.verify_high(high)
if errors:
return errors
high, req_in_errors = self.requisite_in(high)
errors += req_in_errors
high = self.apply_exclude(high)
# Verify that the high data is structurally sound
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.compile_high_data(high)
# If there are extensions in the highstate, process them and update
# the low data chunks
if errors:
return errors
ret = self.call_chunks(chunks)
return ret
def render_template(self, high, template):
errors = []
if not high:
return high, errors
if not isinstance(high, dict):
errors.append(
'Template {0} does not render to a dictionary'.format(template)
)
return high, errors
invalid_items = ('include', 'exclude', 'extends')
for item in invalid_items:
if item in high:
errors.append(
'The \'{0}\' declaration found on \'{1}\' is invalid when '
'rendering single templates'.format(item, template)
)
return high, errors
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], string_types):
# Is this is a short state, it needs to be padded
if '.' in high[name]:
comps = high[name].split('.')
high[name] = {
#'__sls__': template,
#'__env__': None,
comps[0]: [comps[1]]
}
continue
errors.append(
'ID {0} in template {1} is not a dictionary'.format(
name, template
)
)
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith('_'):
continue
if high[name][key] is None:
errors.append(
'ID {0!r} in template {1} contains a short '
'declaration ({2}) with a trailing colon. When not '
'passing any arguments to a state, the colon must be '
'omitted.'.format(name, template, key)
)
continue
if not isinstance(high[name][key], list):
continue
if '.' in key:
comps = key.split('.')
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
'ID {0!r} in template {1!r} contains multiple '
'state declarations of the same type'
.format(name, template)
)
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high, errors
def call_template(self, template):
'''
Enforce the states in a template
'''
high = compile_template(
template, self.rend, self.opts['renderer'])
if not high:
return high
high, errors = self.render_template(high, template)
if errors:
return errors
return self.call_high(high)
def call_template_str(self, template):
'''
Enforce the states in a template, pass the template as a string
'''
high = compile_template_str(
template, self.rend, self.opts['renderer'])
if not high:
return high
high, errors = self.render_template(high, '<template-str>')
if errors:
return errors
return self.call_high(high)
class BaseHighState(object):
'''
The BaseHighState is an abstract base class that is the foundation of
running a highstate, extend it and add a self.state object of type State.
When extending this class, please note that ``self.client`` and
``self.matcher`` should be instantiated and handled.
'''
def __init__(self, opts):
self.opts = self.__gen_opts(opts)
self.iorder = 10000
self.avail = self.__gather_avail()
self.serial = salt.payload.Serial(self.opts)
self.building_highstate = {}
def __gather_avail(self):
'''
Gather the lists of available sls data from the master
'''
avail = {}
for saltenv in self._get_envs():
avail[saltenv] = self.client.list_states(saltenv)
return avail
def __gen_opts(self, opts):
'''
The options used by the High State object are derived from options
on the minion and the master, or just the minion if the high state
call is entirely local.
'''
# If the state is intended to be applied locally, then the local opts
# should have all of the needed data, otherwise overwrite the local
# data items with data from the master
if 'local_state' in opts:
if opts['local_state']:
return opts
mopts = self.client.master_opts()
if not isinstance(mopts, dict):
# An error happened on the master
opts['renderer'] = 'yaml_jinja'
opts['failhard'] = False
opts['state_top'] = 'salt://top.sls'
opts['nodegroups'] = {}
opts['file_roots'] = {'base': [syspaths.BASE_FILE_ROOTS_DIR]}
else:
opts['renderer'] = mopts['renderer']
opts['failhard'] = mopts.get('failhard', False)
if mopts['state_top'].startswith('salt://'):
opts['state_top'] = mopts['state_top']
elif mopts['state_top'].startswith('/'):
opts['state_top'] = os.path.join('salt://', mopts['state_top'][1:])
else:
opts['state_top'] = os.path.join('salt://', mopts['state_top'])
opts['nodegroups'] = mopts.get('nodegroups', {})
opts['state_auto_order'] = mopts.get(
'state_auto_order',
opts['state_auto_order'])
opts['file_roots'] = mopts['file_roots']
opts['state_events'] = mopts.get('state_events')
opts['jinja_lstrip_blocks'] = mopts.get('jinja_lstrip_blocks', False)
opts['jinja_trim_blocks'] = mopts.get('jinja_trim_blocks', False)
return opts
def _get_envs(self):
'''
Pull the file server environments out of the master options
'''
envs = set(['base'])
if 'file_roots' in self.opts:
envs.update(list(self.opts['file_roots']))
return envs
def get_tops(self):
'''
Gather the top files
'''
tops = DefaultOrderedDict(list)
include = DefaultOrderedDict(list)
done = DefaultOrderedDict(list)
# Gather initial top files
if self.opts['environment']:
tops[self.opts['environment']] = [
compile_template(
self.client.cache_file(
self.opts['state_top'],
self.opts['environment']
),
self.state.rend,
self.state.opts['renderer'],
env=self.opts['environment']
)
]
else:
for saltenv in self._get_envs():
tops[saltenv].append(
compile_template(
self.client.cache_file(
self.opts['state_top'],
saltenv
),
self.state.rend,
self.state.opts['renderer'],
saltenv=saltenv
)
)
# Search initial top files for includes
for saltenv, ctops in tops.items():
for ctop in ctops:
if 'include' not in ctop:
continue
for sls in ctop['include']:
include[saltenv].append(sls)
ctop.pop('include')
# Go through the includes and pull out the extra tops and add them
while include:
pops = []
for saltenv, states in include.items():
pops.append(saltenv)
if not states:
continue
for sls_match in states:
for sls in fnmatch.filter(self.avail[saltenv], sls_match):
if sls in done[saltenv]:
continue
tops[saltenv].append(
compile_template(
self.client.get_state(
sls,
saltenv
).get('dest', False),
self.state.rend,
self.state.opts['renderer'],
saltenv=saltenv
)
)
done[saltenv].append(sls)
for saltenv in pops:
if saltenv in include:
include.pop(saltenv)
return tops
def merge_tops(self, tops):
'''
Cleanly merge the top files
'''
top = DefaultOrderedDict(OrderedDict)
for ctops in tops.values():
for ctop in ctops:
for saltenv, targets in ctop.items():
if saltenv == 'include':
continue
try:
for tgt in targets:
if tgt not in top[saltenv]:
top[saltenv][tgt] = ctop[saltenv][tgt]
continue
matches = []
states = set()
for comp in top[saltenv][tgt]:
if isinstance(comp, dict):
matches.append(comp)
if isinstance(comp, string_types):
states.add(comp)
top[saltenv][tgt] = matches
top[saltenv][tgt].extend(list(states))
except TypeError:
raise SaltRenderError('Unable to render top file. No targets found.')
return top
def verify_tops(self, tops):
'''
Verify the contents of the top file data
'''
errors = []
if not isinstance(tops, dict):
errors.append('Top data was not formed as a dict')
# No further checks will work, bail out
return errors
for saltenv, matches in tops.items():
if saltenv == 'include':
continue
if not isinstance(saltenv, string_types):
errors.append(
'Environment {0} in top file is not formed as a '
'string'.format(saltenv)
)
if saltenv == '':
errors.append('Empty saltenv statement in top file')
if not isinstance(matches, dict):
errors.append(
'The top file matches for saltenv {0} are not '
'formatted as a dict'.format(saltenv)
)
for slsmods in matches.values():
if not isinstance(slsmods, list):
errors.append('Malformed topfile (state declarations not '
'formed as a list)')
continue
for slsmod in slsmods:
if isinstance(slsmod, dict):
# This value is a match option
for val in slsmod.values():
if not val:
errors.append(
'Improperly formatted top file matcher '
'in saltenv {0}: {1} file'.format(
slsmod,
val
)
)
elif isinstance(slsmod, string_types):
# This is a sls module
if not slsmod:
errors.append(
'Environment {0} contains an empty sls '
'index'.format(saltenv)
)
return errors
def get_top(self):
'''
Returns the high data derived from the top file
'''
tops = self.get_tops()
return self.merge_tops(tops)
def top_matches(self, top):
'''
Search through the top high data for matches and return the states
that this minion needs to execute.
Returns:
{'saltenv': ['state1', 'state2', ...]}
'''
matches = {}
for saltenv, body in top.items():
if self.opts['environment']:
if saltenv != self.opts['environment']:
continue
for match, data in body.items():
def _filter_matches(_match, _data, _opts):
if isinstance(_data, string_types):
_data = [_data]
if self.matcher.confirm_top(
_match,
_data,
_opts
):
if saltenv not in matches:
matches[saltenv] = []
for item in _data:
if 'subfilter' in item:
_tmpdata = item.pop('subfilter')
for match, data in _tmpdata.items():
_filter_matches(match, data, _opts)
if isinstance(item, string_types):
matches[saltenv].append(item)
_filter_matches(match, data, self.opts['nodegroups'])
ext_matches = self.client.ext_nodes()
for saltenv in ext_matches:
if saltenv in matches:
matches[saltenv] = list(
set(ext_matches[saltenv]).union(matches[saltenv]))
else:
matches[saltenv] = ext_matches[saltenv]
return matches
def load_dynamic(self, matches):
'''
If autoload_dynamic_modules is True then automatically load the
dynamic modules
'''
if not self.opts['autoload_dynamic_modules']:
return
if self.opts.get('local', False):
syncd = self.state.functions['saltutil.sync_all'](list(matches), refresh=False)
else:
syncd = self.state.functions['saltutil.sync_all'](list(matches))
if syncd['grains']:
self.opts['grains'] = salt.loader.grains(self.opts)
self.state.opts['pillar'] = self.state._gather_pillar()
self.state.module_refresh()
def render_state(self, sls, saltenv, mods, matches):
'''
Render a state file and retrieve all of the include states
'''
err = ''
errors = []
state_data = self.client.get_state(sls, saltenv)
fn_ = state_data.get('dest', False)
if not fn_:
errors.append(
'Specified SLS {0} in saltenv {1} is not '
'available on the salt master'.format(sls, saltenv)
)
state = None
try:
state = compile_template(
fn_, self.state.rend, self.state.opts['renderer'], saltenv,
sls, rendered_sls=mods
)
except SaltRenderError as exc:
msg = 'Rendering SLS \'{0}:{1}\' failed: {2}'.format(
saltenv, sls, exc
)
log.critical(msg)
errors.append(msg)
except Exception as exc:
msg = 'Rendering SLS {0} failed, render error: {1}'.format(
sls, exc
)
log.critical(
msg,
# Show the traceback if the debug logging level is enabled
exc_info=log.isEnabledFor(logging.DEBUG)
)
errors.append('{0}\n{1}'.format(msg, traceback.format_exc()))
mods.add('{0}:{1}'.format(saltenv, sls))
if state:
if not isinstance(state, dict):
errors.append(
'SLS {0} does not render to a dictionary'.format(sls)
)
else:
include = []
if 'include' in state:
if not isinstance(state['include'], list):
err = ('Include Declaration in SLS {0} is not formed '
'as a list'.format(sls))
errors.append(err)
else:
include = state.pop('include')
self._handle_extend(state, sls, saltenv, errors)
self._handle_exclude(state, sls, saltenv, errors)
self._handle_state_decls(state, sls, saltenv, errors)
for inc_sls in include:
# inc_sls may take the form of:
# 'sls.to.include' <- same as {<saltenv>: 'sls.to.include'}
# {<env_key>: 'sls.to.include'}
# {'_xenv': 'sls.to.resolve'}
xenv_key = '_xenv'
if isinstance(inc_sls, dict):
env_key, inc_sls = inc_sls.popitem()
else:
env_key = saltenv
if env_key not in self.avail:
msg = ('Nonexistant saltenv {0!r} found in include '
'of {1!r} within SLS \'{2}:{3}\''
.format(env_key, inc_sls, saltenv, sls))
log.error(msg)
errors.append(msg)
continue
if inc_sls.startswith('.'):
p_comps = sls.split('.')
if state_data.get('source', '').endswith('/init.sls'):
inc_sls = sls + inc_sls
else:
inc_sls = '.'.join(p_comps[:-1]) + inc_sls
if env_key != xenv_key:
# Resolve inc_sls in the specified environment
if env_key in matches or fnmatch.filter(self.avail[env_key], inc_sls):
resolved_envs = [env_key]
else:
resolved_envs = []
else:
# Resolve inc_sls in the subset of environment matches
resolved_envs = [
aenv for aenv in matches
if fnmatch.filter(self.avail[aenv], inc_sls)
]
# An include must be resolved to a single environment, or
# the include must exist in the current environment
if len(resolved_envs) == 1 or saltenv in resolved_envs:
# Match inc_sls against the available states in the
# resolved env, matching wildcards in the process. If
# there were no matches, then leave inc_sls as the
# target so that the next recursion of render_state
# will recognize the error.
sls_targets = fnmatch.filter(
self.avail[saltenv],
inc_sls
) or [inc_sls]
for sls_target in sls_targets:
r_env = resolved_envs[0] if len(resolved_envs) == 1 else saltenv
mod_tgt = '{0}:{1}'.format(r_env, sls_target)
if mod_tgt not in mods:
nstate, err = self.render_state(
sls_target,
r_env,
mods,
matches
)
if nstate:
self.merge_included_states(state, nstate, errors)
state.update(nstate)
if err:
errors.extend(err)
else:
msg = ''
if not resolved_envs:
msg = ('Unknown include: Specified SLS {0}: {1} is not available on the salt '
'master in saltenv(s): {2} '
).format(env_key,
inc_sls,
', '.join(matches) if env_key == xenv_key else env_key)
elif len(resolved_envs) > 1:
msg = ('Ambiguous include: Specified SLS {0}: {1} is available on the salt master '
'in multiple available saltenvs: {2}'
).format(env_key,
inc_sls,
', '.join(resolved_envs))
log.critical(msg)
errors.append(msg)
self._handle_iorder(state)
else:
state = {}
return state, errors
def _handle_iorder(self, state):
'''
Take a state and apply the iorder system
'''
if self.opts['state_auto_order']:
for name in state:
for s_dec in state[name]:
if not isinstance(s_dec, string_types):
# PyDSL OrderedDict?
continue
if not isinstance(state[name], dict):
# Include's or excludes as lists?
continue
if not isinstance(state[name][s_dec], list):
# Bad syntax, let the verify seq pick it up later on
continue
found = False
if s_dec.startswith('_'):
continue
for arg in state[name][s_dec]:
if isinstance(arg, dict):
if len(arg) > 0:
if arg.keys()[0] == 'order':
found = True
if not found:
if not isinstance(state[name][s_dec], list):
# quite certainly a syntax error, managed elsewhere
continue
state[name][s_dec].append(
{'order': self.iorder}
)
self.iorder += 1
return state
def _handle_state_decls(self, state, sls, saltenv, errors):
'''
Add sls and saltenv components to the state
'''
for name in state:
if not isinstance(state[name], dict):
if name == '__extend__':
continue
if name == '__exclude__':
continue
if isinstance(state[name], string_types):
# Is this is a short state, it needs to be padded
if '.' in state[name]:
comps = state[name].split('.')
state[name] = {'__sls__': sls,
'__env__': saltenv,
comps[0]: [comps[1]]}
continue
errors.append(
'ID {0} in SLS {1} is not a dictionary'.format(name, sls)
)
continue
skeys = set()
for key in state[name]:
if key.startswith('_'):
continue
if not isinstance(state[name][key], list):
continue
if '.' in key:
comps = key.split('.')
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - source: salt://redis/redis.conf
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
'ID {0!r} in SLS {1!r} contains multiple state '
'declarations of the same type'.format(name, sls)
)
continue
state[name][comps[0]] = state[name].pop(key)
state[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
if '__sls__' not in state[name]:
state[name]['__sls__'] = sls
if '__env__' not in state[name]:
state[name]['__env__'] = saltenv
def _handle_extend(self, state, sls, saltenv, errors):
'''
Take the extend dec out of state and apply to the highstate global
dec
'''
if 'extend' in state:
ext = state.pop('extend')
if not isinstance(ext, dict):
errors.append(('Extension value in SLS {0!r} is not a '
'dictionary').format(sls))
return
for name in ext:
if not isinstance(ext[name], dict):
errors.append(('Extension name {0!r} in SLS {1!r} is '
'not a dictionary'
.format(name, sls)))
continue
if '__sls__' not in ext[name]:
ext[name]['__sls__'] = sls
if '__env__' not in ext[name]:
ext[name]['__env__'] = saltenv
for key in ext[name]:
if key.startswith('_'):
continue
if not isinstance(ext[name][key], list):
continue
if '.' in key:
comps = key.split('.')
ext[name][comps[0]] = ext[name].pop(key)
ext[name][comps[0]].append(comps[1])
state.setdefault('__extend__', []).append(ext)
def _handle_exclude(self, state, sls, saltenv, errors):
'''
Take the exclude dec out of the state and apply it to the highstate
global dec
'''
if 'exclude' in state:
exc = state.pop('exclude')
if not isinstance(exc, list):
err = ('Exclude Declaration in SLS {0} is not formed '
'as a list'.format(sls))
errors.append(err)
state.setdefault('__exclude__', []).extend(exc)
def render_highstate(self, matches):
'''
Gather the state files and render them into a single unified salt
high data structure.
'''
highstate = self.building_highstate
all_errors = []
mods = set()
for saltenv, states in matches.items():
for sls_match in states:
statefiles = fnmatch.filter(self.avail[saltenv], sls_match)
# if we did not found any sls in the fileserver listing, this
# may be because the sls was generated or added later, we can
# try to directly execute it, and if it fails, anyway it will
# return the former error
if not statefiles:
statefiles = [sls_match]
for sls in statefiles:
r_env = '{0}:{1}'.format(saltenv, sls)
if r_env in mods:
continue
state, errors = self.render_state(
sls, saltenv, mods, matches)
if state:
self.merge_included_states(highstate, state, errors)
for i, error in enumerate(errors[:]):
if 'is not available on the salt master' in error:
# match SLS foobar in environment
this_sls = 'SLS {0} in saltenv'.format(
sls_match)
if this_sls in error:
errors[i] = (
'No matching sls found for {0!r} '
'in env {1!r}'.format(sls_match, saltenv))
all_errors.extend(errors)
self.clean_duplicate_extends(highstate)
return highstate, all_errors
def clean_duplicate_extends(self, highstate):
if '__extend__' in highstate:
highext = []
for items in (ext.items() for ext in highstate['__extend__']):
for item in items:
if item not in highext:
highext.append(item)
highstate['__extend__'] = [{t[0]: t[1]} for t in highext]
def merge_included_states(self, highstate, state, errors):
# The extend members can not be treated as globally unique:
if '__extend__' in state:
highstate.setdefault('__extend__',
[]).extend(state.pop('__extend__'))
if '__exclude__' in state:
highstate.setdefault('__exclude__',
[]).extend(state.pop('__exclude__'))
for id_ in state:
if id_ in highstate:
if highstate[id_] != state[id_]:
errors.append((
'Detected conflicting IDs, SLS'
' IDs need to be globally unique.\n The'
' conflicting ID is {0!r} and is found in SLS'
' \'{1}:{2}\' and SLS \'{3}:{4}\'').format(
id_,
highstate[id_]['__env__'],
highstate[id_]['__sls__'],
state[id_]['__env__'],
state[id_]['__sls__'])
)
try:
highstate.update(state)
except ValueError:
errors.append(
'Error when rendering state with contents: {0}'.format(state)
)
def _check_pillar(self, force=False):
'''
Check the pillar for errors, refuse to run the state if there are
errors in the pillar and return the pillar errors
'''
if force:
return True
if '_errors' in self.state.opts['pillar']:
return False
return True
def call_highstate(self, exclude=None, cache=None, cache_name='highstate',
force=False):
'''
Run the sequence to execute the salt highstate for this minion
'''
#Check that top file exists
tag_name = 'no_|-states_|-states_|-None'
ret = {tag_name: {
'result': False,
'comment': 'No states found for this minion',
'name': 'No States',
'changes': {},
'__run_num__': 0,
}}
cfn = os.path.join(
self.opts['cachedir'],
'{0}.cache.p'.format(cache_name)
)
if cache:
if os.path.isfile(cfn):
with salt.utils.fopen(cfn, 'rb') as fp_:
high = self.serial.load(fp_)
return self.state.call_high(high)
#File exists so continue
err = []
try:
top = self.get_top()
except SaltRenderError as err:
ret[tag_name]['comment'] = err.error
return ret
except Exception:
trb = traceback.format_exc()
err.append(trb)
return err
err += self.verify_tops(top)
matches = self.top_matches(top)
if not matches:
msg = ('No Top file or external nodes data matches found')
ret[tag_name]['comment'] = msg
return ret
self.load_dynamic(matches)
if not self._check_pillar(force):
err += ['Pillar failed to render with the following messages:']
err += self.state.opts['pillar']['_errors']
else:
high, errors = self.render_highstate(matches)
if exclude:
if isinstance(exclude, str):
exclude = exclude.split(',')
if '__exclude__' in high:
high['__exclude__'].extend(exclude)
else:
high['__exclude__'] = exclude
err += errors
if err:
return err
if not high:
return ret
cumask = os.umask(077)
try:
if salt.utils.is_windows():
# Make sure cache file isn't read-only
self.state.functions['cmd.run']('attrib -R "{0}"'.format(cfn), output_loglevel='quiet')
with salt.utils.fopen(cfn, 'w+b') as fp_:
try:
self.serial.dump(high, fp_)
except TypeError:
# Can't serialize pydsl
pass
except (IOError, OSError):
msg = 'Unable to write to "state.highstate" cache file {0}'
log.error(msg.format(cfn))
os.umask(cumask)
return self.state.call_high(high)
def compile_highstate(self):
'''
Return just the highstate or the errors
'''
err = []
top = self.get_top()
err += self.verify_tops(top)
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
err += errors
if err:
return err
return high
def compile_low_chunks(self):
'''
Compile the highstate but don't run it, return the low chunks to
see exactly what the highstate will execute
'''
top = self.get_top()
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
# If there is extension data reconcile it
high, ext_errors = self.state.reconcile_extend(high)
errors += ext_errors
# Verify that the high data is structurally sound
errors += self.state.verify_high(high)
high, req_in_errors = self.state.requisite_in(high)
errors += req_in_errors
high = self.state.apply_exclude(high)
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.state.compile_high_data(high)
return chunks
class HighState(BaseHighState):
'''
Generate and execute the salt "High State". The High State is the
compound state derived from a group of template files stored on the
salt master or in the local cache.
'''
# a stack of active HighState objects during a state.highstate run
stack = []
def __init__(self, opts, pillar=None, jid=None):
self.client = salt.fileclient.get_file_client(opts)
BaseHighState.__init__(self, opts)
self.state = State(self.opts, pillar, jid)
self.matcher = salt.minion.Matcher(self.opts)
# tracks all pydsl state declarations globally across sls files
self._pydsl_all_decls = {}
# a stack of current rendering Sls objects, maintained and used by the pydsl renderer.
self._pydsl_render_stack = []
def push_active(self):
self.stack.append(self)
@classmethod
def pop_active(cls):
cls.stack.pop()
@classmethod
def get_active(cls):
try:
return cls.stack[-1]
except IndexError:
return None
class MasterState(State):
'''
Create a State object for master side compiling
'''
def __init__(self, opts, minion):
State.__init__(self, opts)
def load_modules(self, data=None):
'''
Load the modules into the state
'''
log.info('Loading fresh modules for state activity')
# Load a modified client interface that looks like the interface used
# from the minion, but uses remote execution
#
self.functions = salt.client.FunctionWrapper(
self.opts,
self.opts['id']
)
# Load the states, but they should not be used in this class apart
# from inspection
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions, states=self.states)
class MasterHighState(HighState):
'''
Execute highstate compilation from the master
'''
def __init__(self, master_opts, minion_opts, grains, id_,
saltenv=None,
env=None):
if isinstance(env, string_types):
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt '
'Boron.'
)
# Backwards compatibility
saltenv = env
# Force the fileclient to be local
opts = copy.deepcopy(minion_opts)
opts['file_client'] = 'local'
opts['file_roots'] = master_opts['master_roots']
opts['renderer'] = master_opts['renderer']
opts['state_top'] = master_opts['state_top']
opts['id'] = id_
opts['grains'] = grains
HighState.__init__(self, opts)
class RemoteHighState(object):
'''
Manage gathering the data from the master
'''
def __init__(self, opts, grains):
self.opts = opts
self.grains = grains
self.serial = salt.payload.Serial(self.opts)
# self.auth = salt.crypt.SAuth(opts)
self.sreq = salt.transport.Channel.factory(self.opts['master_uri'])
def compile_master(self):
'''
Return the state data from the master
'''
load = {'grains': self.grains,
'opts': self.opts,
'cmd': '_master_state'}
try:
return self.sreq.send(load, tries=3, timeout=72000)
# return self.auth.crypticle.loads(self.sreq.send(
# 'aes',
# self.auth.crypticle.dumps(load),
# 3,
# 72000))
except SaltReqTimeoutError:
return {}
| 42.154292 | 132 | 0.417921 |
f76a07fdb0dedcf997f75c661986255b0247ac1a
| 11,197 |
py
|
Python
|
tests/parsers/test_notebook_parsers.py
|
s-weigand/flake8-nb
|
39c6cf6158cc231c420ff783a550b09ee5f7e4c7
|
[
"Apache-2.0"
] | 23 |
2019-12-05T06:02:43.000Z
|
2022-03-11T18:17:19.000Z
|
tests/parsers/test_notebook_parsers.py
|
s-weigand/flake8-nb
|
39c6cf6158cc231c420ff783a550b09ee5f7e4c7
|
[
"Apache-2.0"
] | 191 |
2019-10-04T06:22:14.000Z
|
2022-03-29T04:02:28.000Z
|
tests/parsers/test_notebook_parsers.py
|
s-weigand/flake8-nb
|
39c6cf6158cc231c420ff783a550b09ee5f7e4c7
|
[
"Apache-2.0"
] | 6 |
2020-06-13T13:35:15.000Z
|
2021-11-28T19:50:12.000Z
|
import os
import warnings
from typing import Dict
from typing import List
from typing import Tuple
from typing import Union
import pytest
from flake8_nb.parsers import CellId
from flake8_nb.parsers.notebook_parsers import InputLineMapping
from flake8_nb.parsers.notebook_parsers import InvalidNotebookWarning
from flake8_nb.parsers.notebook_parsers import NotebookParser
from flake8_nb.parsers.notebook_parsers import create_intermediate_py_file
from flake8_nb.parsers.notebook_parsers import create_temp_path
from flake8_nb.parsers.notebook_parsers import get_notebook_code_cells
from flake8_nb.parsers.notebook_parsers import get_rel_paths
from flake8_nb.parsers.notebook_parsers import ignore_cell
from flake8_nb.parsers.notebook_parsers import is_parent_dir
from flake8_nb.parsers.notebook_parsers import map_intermediate_to_input
from flake8_nb.parsers.notebook_parsers import read_notebook_to_cells
from tests import TEST_NOTEBOOK_BASE_PATH
INTERMEDIATE_PY_FILE_BASE_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "data", "intermediate_py_files")
)
def get_expected_intermediate_file_results(result_name: str, base_path: str) -> Tuple[str, str]:
expected_result_path = os.path.join(base_path, "tests", "data", "notebooks", result_name)
expected_result_file_path = os.path.join(INTERMEDIATE_PY_FILE_BASE_PATH, result_name)
if result_name.startswith("not_a_notebook"):
expected_result_str = ""
else:
with open(expected_result_file_path) as result_file:
expected_result_str = result_file.read()
return expected_result_path, expected_result_str
@pytest.mark.parametrize(
"notebook_name,expected_input_line_mapping",
[
("not_a_notebook", {"input_ids": [], "code_lines": []}),
(
"notebook_with_flake8_tags",
{
"input_ids": [
CellId("1", 1, 4),
CellId("2", 2, 6),
CellId("3", 3, 8),
CellId("4", 4, 11),
CellId("5", 5, 13),
CellId("6", 6, 15),
CellId("7", 7, 18),
CellId("8", 8, 20),
CellId("9", 9, 22),
CellId("10", 10, 24),
],
"code_lines": [4, 11, 18, 25, 33, 41, 49, 56, 62, 68],
},
),
(
"notebook_with_out_ipython_magic",
{"input_ids": [CellId("1", 1, 3)], "code_lines": [1]},
),
(
"cell_with_source_string",
{"input_ids": [("1", 1, 1)], "code_lines": [1]},
),
(
"notebook_with_out_flake8_tags",
{
"input_ids": [
CellId("1", 1, 3),
CellId("2", 2, 5),
CellId("3", 3, 7),
CellId("4", 4, 9),
CellId("5", 6, 13),
CellId("6", 7, 15),
CellId("7", 8, 17),
CellId("8", 9, 19),
],
"code_lines": [4, 10, 16, 23, 31, 37, 43, 49],
},
),
],
)
def test_create_intermediate_py_file(
tmpdir, notebook_name: str, expected_input_line_mapping: Dict[str, List[Union[str, int]]]
):
notebook_path = os.path.join(TEST_NOTEBOOK_BASE_PATH, f"{notebook_name}.ipynb")
tmp_base_path = str(tmpdir)
expected_result_path, expected_result_str = get_expected_intermediate_file_results(
f"{notebook_name}.ipynb_parsed", tmp_base_path
)
if notebook_name.startswith("not_a_notebook"):
with pytest.warns(InvalidNotebookWarning):
intermediate_file_path, input_line_mapping = create_intermediate_py_file(
notebook_path, tmp_base_path
)
assert intermediate_file_path == ""
assert input_line_mapping == expected_input_line_mapping
else:
intermediate_file_path, input_line_mapping = create_intermediate_py_file(
notebook_path, tmp_base_path
)
assert intermediate_file_path == expected_result_path
assert input_line_mapping == expected_input_line_mapping
with open(intermediate_file_path) as result_file:
assert result_file.read() == expected_result_str
@pytest.mark.parametrize(
"notebook_path,rel_result_path",
[
(os.path.join(os.curdir, "file_name.ipynb"), ["file_name.ipynb_parsed"]),
(os.path.join(os.curdir, "../file_name.ipynb"), ["file_name.ipynb_parsed"]),
(
os.path.join(os.curdir, "sub_dir", "file_name.ipynb"),
["sub_dir", "file_name.ipynb_parsed"],
),
(
os.path.join(os.curdir, "sub_dir", "sub_sub_dir", "file_name.ipynb"),
["sub_dir", "sub_sub_dir", "file_name.ipynb_parsed"],
),
],
)
def test_create_temp_path(tmpdir, notebook_path: str, rel_result_path: List[str]):
expected_result_path = os.path.join(str(tmpdir), *rel_result_path)
result_path = create_temp_path(notebook_path, str(tmpdir))
assert result_path == os.path.abspath(expected_result_path)
assert os.path.isdir(os.path.dirname(result_path))
@pytest.mark.parametrize(
"notebook_name,number_of_cells,uses_get_ipython_result",
[
("not_a_notebook.ipynb", 0, False),
("cell_with_source_string.ipynb", 1, False),
("notebook_with_flake8_tags.ipynb", 10, True),
("notebook_with_out_flake8_tags.ipynb", 8, True),
("notebook_with_out_ipython_magic.ipynb", 1, False),
],
)
def test_get_notebook_code_cells(
notebook_name: str, number_of_cells: int, uses_get_ipython_result: bool
):
notebook_path = os.path.join(TEST_NOTEBOOK_BASE_PATH, notebook_name)
if notebook_name.startswith("not_a_notebook"):
with pytest.warns(InvalidNotebookWarning):
uses_get_ipython, notebook_cells = get_notebook_code_cells(notebook_path)
assert uses_get_ipython == uses_get_ipython_result
assert len(notebook_cells) == number_of_cells
else:
uses_get_ipython, notebook_cells = get_notebook_code_cells(notebook_path)
assert uses_get_ipython == uses_get_ipython_result
assert len(notebook_cells) == number_of_cells
@pytest.mark.parametrize(
"file_paths,base_path,expected_result",
[
(
[os.curdir, os.path.join(os.curdir, "file.foo")],
os.curdir,
[".", "file.foo"],
),
(
[os.path.join(os.curdir, "..", "file.foo")],
os.curdir,
[f"..{os.sep}file.foo"],
),
],
)
def test_get_rel_paths(file_paths: List[str], base_path: str, expected_result: List[str]):
assert get_rel_paths(file_paths, base_path) == expected_result
@pytest.mark.parametrize(
"notebook_cell,expected_result",
[
({"source": ["print('foo')"], "cell_type": "code"}, False),
({"source": ["## print('foo')"], "cell_type": "markdown"}, True),
({"source": [], "cell_type": "code"}, True),
],
)
def test_ignore_cell(notebook_cell: Dict, expected_result: bool):
assert ignore_cell(notebook_cell) == expected_result
@pytest.mark.parametrize(
"parent_dir,path,expected_result",
[
(os.curdir, os.curdir, True),
(os.curdir, os.path.join(os.curdir, "file.foo"), True),
(os.curdir, os.path.join(os.curdir, "subdir", "file.foo"), True),
(os.curdir, os.path.join(os.curdir, "..", "file.foo"), False),
],
)
def test_is_parent_dir(parent_dir: str, path: str, expected_result):
assert is_parent_dir(parent_dir, path) == expected_result
@pytest.mark.parametrize(
"notebook_name,number_of_cells",
[
("not_a_notebook.ipynb", 0),
("notebook_with_flake8_tags.ipynb", 24),
("notebook_with_out_flake8_tags.ipynb", 19),
("notebook_with_out_ipython_magic.ipynb", 3),
],
)
def test_read_notebook_to_cells(notebook_name: str, number_of_cells: int):
notebook_path = os.path.join(TEST_NOTEBOOK_BASE_PATH, notebook_name)
if notebook_name.startswith("not_a_notebook"):
with pytest.warns(InvalidNotebookWarning):
assert len(read_notebook_to_cells(notebook_path)) == number_of_cells
else:
assert len(read_notebook_to_cells(notebook_path)) == number_of_cells
def test_InvalidNotebookWarning():
with pytest.warns(
InvalidNotebookWarning,
match=(
"Error parsing notebook at path 'dummy_path'. " "Make sure this is a valid notebook."
),
):
warnings.warn(InvalidNotebookWarning("dummy_path"))
@pytest.mark.parametrize(
"line_number,expected_result",
[(15, (("2", 2, 2), 2)), (30, (("4", 4, 5), 3)), (52, (("7", 9, 15), 1))],
)
def test_map_intermediate_to_input_line(line_number: int, expected_result: Tuple[str, int]):
input_line_mapping: InputLineMapping = {
"input_ids": [
CellId("1", 1, 1),
CellId("2", 2, 2),
CellId("3", 3, 3),
CellId("4", 4, 5),
CellId("5", 6, 8),
CellId("6", 7, 10),
CellId("7", 9, 15),
],
"code_lines": [4, 11, 18, 25, 33, 41, 49],
}
assert map_intermediate_to_input(input_line_mapping, line_number) == expected_result
#################################
# NotebookParser Tests #
#################################
def test_NotebookParser_create_intermediate_py_file_paths(
notebook_parser: NotebookParser,
):
for original_notebook in notebook_parser.original_notebook_paths:
assert os.path.isfile(original_notebook)
for intermediate_py_file in notebook_parser.intermediate_py_file_paths:
assert os.path.isfile(intermediate_py_file)
assert notebook_parser.temp_path != ""
original_count = len(notebook_parser.original_notebook_paths)
intermediate_count = len(notebook_parser.intermediate_py_file_paths)
input_line_mapping_count = len(notebook_parser.input_line_mappings)
assert original_count == 3
assert intermediate_count == 3
assert input_line_mapping_count == 3
def test_NotebookParser_cross_instance_value_propagation(
notebook_parser: NotebookParser,
):
notebook_parser.get_mappings()
new_parser_instance = NotebookParser()
original_count = len(new_parser_instance.original_notebook_paths)
intermediate_count = len(new_parser_instance.intermediate_py_file_paths)
input_line_mapping_count = len(new_parser_instance.input_line_mappings)
assert original_count == 3
assert intermediate_count == 3
assert input_line_mapping_count == 3
def test_NotebookParser_clean_up(notebook_parser: NotebookParser):
temp_path = notebook_parser.temp_path
notebook_parser.clean_up()
assert not os.path.exists(temp_path)
assert notebook_parser.temp_path == ""
original_count = len(notebook_parser.original_notebook_paths)
intermediate_count = len(notebook_parser.intermediate_py_file_paths)
input_line_mapping_count = len(notebook_parser.input_line_mappings)
assert original_count == 0
assert intermediate_count == 0
assert input_line_mapping_count == 0
| 37.199336 | 97 | 0.652228 |
f76a1909a8fc1b3f7b47133da7f4566bc7fa5200
| 3,782 |
py
|
Python
|
zynq/tkinter_kws.py
|
Roxbili/kws-demo
|
7e0674f1407572fc8f148293b23fa20a5164bc5e
|
[
"Apache-2.0"
] | null | null | null |
zynq/tkinter_kws.py
|
Roxbili/kws-demo
|
7e0674f1407572fc8f148293b23fa20a5164bc5e
|
[
"Apache-2.0"
] | null | null | null |
zynq/tkinter_kws.py
|
Roxbili/kws-demo
|
7e0674f1407572fc8f148293b23fa20a5164bc5e
|
[
"Apache-2.0"
] | null | null | null |
#-*- encoding: utf-8 -*-
import time
import argparse
import numpy as np
import tkinter as tk
from tkinter.ttk import Label
from kws_ps_pl import BRAM, PSPLTalk, InputDataToBram
from multiprocessing import Process
class timeRecorder(object):
def __init__(self):
self.total_time = 0.
self.counter = 0
def start(self):
self.start_time = time.time()
def end(self):
self.total_time += time.time() - self.start_time
self.counter += 1
def get_total_time(self):
return self.total_time
def get_avg_time(self):
return self.total_time / self.counter
class App(PSPLTalk):
def __init__(self, args):
super(App, self).__init__()
self.input_object = InputDataToBram(args.mode)
self.input_object.reset_flag() # 初始化标记位
self.input_object.info2bram() # 发送基本的参数
self.input_data_path = iter(self.input_object.data_path) # 创建输入数据路径的迭代器
self.timer = timeRecorder()
self.root = tk.Tk()
self.word = Label(self.root)
self.txt_placeholder = tk.StringVar()
self._set_text('###')
color = '#1C1C1C'
self._set_root(color)
self._set_label(color)
self.first_scan = True # 第一轮mainloop先把组件显示出来
def mainloop(self):
self.root.mainloop()
def _set_root(self, color):
self.root.geometry('200x60')
self.root.title('Keywords spotting')
self.root.config(background=color)
def _set_label(self, color):
self.word.config(
width = 7,
font=("Times", 40, 'bold'),
textvariable=self.txt_placeholder,
background=color,
foreground='#FCFAF2'
)
# self.txt_placeholder.set('unknown')
# lbl = Label(root, font = ('calibri', 40, 'bold'),
# background = 'purple',
# foreground = 'white')
self.word.pack(anchor='center', ipady=5)
def _set_text(self, txt):
self.txt_placeholder.set(txt)
def show_result(self):
# 第一轮mainloop先显示组件
if self.first_scan:
self.word.after(1000, self.show_result)
self.first_scan = False
return
# 首先拿到数据
path = next(self.input_data_path) # 遍历数据集
# path = self.input_object.data_path[0] # 测试用,仅看0_no.npy
input_data = np.load(path)
# 接着监测标记位是否改变,是的话发送数据,否则阻塞
while not self.input_object.sendData(input_data): pass
while True:
result_flag = self.bram.read_oneByOne(1, start=0x0, map_id=1)
if result_flag[0] == 1:
self.timer.start()
# reset result flag
self.bram.write(b'\x00\x00\x00\x00', start=0x0, map_id=1)
# get result
result = self.bram.read_oneByOne(12, start=0x4, map_id=1)
# show result
word = self.words_list[np.argmax(result)]
self._set_text(word)
print('path: %s, show word %s' % (path, word))
self.timer.end()
print('Total time: {}'.format(self.timer.get_total_time()))
print('Average time: {}'.format(self.timer.get_avg_time()))
self.word.after(1, self.show_result) # 表示接着运行
break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-m',
'--mode',
type=str,
default='sdData',
)
args = parser.parse_args()
##################### init #####################
app = App(args)
##################### run 2 process #####################
print('Start listening...')
app.show_result()
app.mainloop()
| 30.015873 | 82 | 0.556584 |
f76a62a3c29de99b7ca31590c4643dc8281f10ff
| 8,892 |
py
|
Python
|
billiard/util.py
|
ask/billiard
|
bb9c1c43528b2a969c99aa5c57129daf2e1ce646
|
[
"BSD-3-Clause"
] | 1 |
2022-02-18T05:39:48.000Z
|
2022-02-18T05:39:48.000Z
|
billiard/util.py
|
ask/billiard
|
bb9c1c43528b2a969c99aa5c57129daf2e1ce646
|
[
"BSD-3-Clause"
] | null | null | null |
billiard/util.py
|
ask/billiard
|
bb9c1c43528b2a969c99aa5c57129daf2e1ce646
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Module providing various facilities to other parts of the package
#
# billiard/util.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
from __future__ import absolute_import
import errno
import functools
import itertools
import weakref
import atexit
import shutil
import tempfile
import threading # we want threading to install its
# cleanup function before multiprocessing does
from .process import current_process, active_children
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
ERROR = 40
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
#: Support for reinitialization of objects when bootstrapping a child process
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
#: Finalization using weakrefs
_finalizer_registry = {}
_finalizer_counter = itertools.count()
#: set to true if the process is shutting down.
_exiting = False
def sub_debug(msg, *args, **kwargs):
if _logger:
_logger.log(SUBDEBUG, msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
if _logger:
_logger.log(DEBUG, msg, *args, **kwargs)
return True
return False
def info(msg, *args, **kwargs):
if _logger:
_logger.log(INFO, msg, *args, **kwargs)
return True
return False
def sub_warning(msg, *args, **kwargs):
if _logger:
_logger.log(SUBWARNING, msg, *args, **kwargs)
return True
return False
def error(msg, *args, **kwargs):
if _logger:
_logger.log(ERROR, msg, *args, **kwargs)
return True
return False
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
def get_temp_dir():
'''
Function returning a temp directory which will be removed on exit
'''
# get name of a temp directory which will be automatically cleaned up
if current_process()._tempdir is None:
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
current_process()._tempdir = tempdir
return current_process()._tempdir
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception, e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(_afterfork_counter.next(), id(obj), func)] = obj
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, _finalizer_counter.next())
_finalizer_registry[self._key] = self
def __call__(self, wr=None,
# Need to bind these locally because the globals can have
# been cleared at shutdown
_finalizer_registry=_finalizer_registry,
sub_debug=sub_debug):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if minpriority is None:
f = lambda p: p[0][0] is not None
else:
f = lambda p: p[0][0] is not None and p[0][0] >= minpriority
items = [x for x in _finalizer_registry.items() if f(x)]
items.sort(reverse=True)
for key, finalizer in items:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
if not error("Error calling finalizer %r", finalizer,
exc_info=True):
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
@atexit.register
def _exit_function():
'''
Clean up on exit
'''
global _exiting
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
for p in active_children():
if p._daemonic:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
class ForkAwareThreadLock(object):
def __init__(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
register_after_fork(self, ForkAwareThreadLock.__init__)
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj: obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
def _eintr_retry(func):
'''
Automatic retry after EINTR.
'''
@functools.wraps(func)
def wrapped(*args, **kwargs):
while 1:
try:
return func(*args, **kwargs)
except OSError, exc:
if exc.errno != errno.EINTR:
raise
return wrapped
| 26.464286 | 79 | 0.617746 |
f76a67b29770d4e00e2de5d225c6d7afc0f949cc
| 9,140 |
py
|
Python
|
src/nti/zodb/activitylog.py
|
NextThought/nti.zodb
|
2eb56b6a617fe83c738bfc4651b31d080856e3fc
|
[
"Apache-2.0"
] | null | null | null |
src/nti/zodb/activitylog.py
|
NextThought/nti.zodb
|
2eb56b6a617fe83c738bfc4651b31d080856e3fc
|
[
"Apache-2.0"
] | 10 |
2017-06-08T12:24:31.000Z
|
2021-04-01T16:52:38.000Z
|
src/nti/zodb/activitylog.py
|
NextThought/nti.zodb
|
2eb56b6a617fe83c738bfc4651b31d080856e3fc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Logging of database connection activity. Activate this with ZCML::
<include package="nti.zodb" file="configure_activitylog.zcml" />
Originally based on code from the unreleased zc.zodbactivitylog.
"""
from __future__ import print_function, absolute_import, division
__docformat__ = "restructuredtext en"
import os
from collections import namedtuple
from functools import partial
from perfmetrics import statsd_client
from ZConfig.datatypes import integer
from ZConfig.datatypes import RangeCheckedConversion
logger = __import__('logging').getLogger(__name__)
non_negative_integer = RangeCheckedConversion(integer, min=0)
def _setting_from_environ(converter, environ_name, default):
result = default
env_val = os.environ.get(environ_name, default)
if env_val is not default:
try:
result = converter(env_val)
except (ValueError, TypeError):
logger.exception("Failed to parse environment value %r for key %r",
env_val, environ_name)
result = default
logger.debug('Using value %s from environ %r=%r (default=%r)',
result, environ_name, env_val, default)
return result
def _get_non_negative_integer_from_environ(environ_name, default):
return _setting_from_environ(non_negative_integer, environ_name, default)
class ComponentActivityMonitor(object):
"""
Activity monitor that:
- Can call another activity monitor; this is useful for adding
on to an existing activity monitor.
- Can call a sequence of callables with some data. This is useful for
composing many activity monitors much more cheaply than if they
were on their own.
"""
__slots__ = (
'base',
'components',
'_get_loads_stores',
)
def __init__(self, base=None, components=()):
"""
:param base: An arbitrary activity monitor.
:param components: An iterable of :class:`ActivityMonitorComponent`
instances (callables taking a :class:`ActivityMonitorData`).
"""
self.base = base
self.components = components
if base is not None:
self._get_loads_stores = partial(self._call_base_get_loads_stores, base)
else:
self._get_loads_stores = self._no_base_get_loads_stores
@staticmethod
def _call_base_get_loads_stores(base, conn):
# Capture the load and store counts before the base has a
# chance to clear them.
loads, stores = conn.getTransferCounts(False)
base.closedConnection(conn)
# Make sure connection counts are cleared if the base did not.
conn.getTransferCounts(True)
return loads, stores
@staticmethod
def _no_base_get_loads_stores(conn):
# We're the end of the chain, we can clear
return conn.getTransferCounts(True)
def closedConnection(self, conn):
# This is called for every transaction, so performance matters somewhat.
# OTOH, what we do here is probably far less costly than writing a log message;
# the Statsd client from perfmetrics buffers so it's pretty cheap.
# Still, we expect most people to use both statsd and logging, so doing some work
# a single time should pay off.
db = conn.db()
db_name = db.database_name
# These are both usually protected by a lock as they can mutate together.
# We're not really concerned about that. We use `len`, implemented in C, which
# doesn't drop the GIL, so we're also not concerned about mutation happening while
# we iterate.
pool_all_count = len(db.pool.all)
pool_available_count = len(db.pool.available)
loads, stores = self._get_loads_stores(conn)
data = ActivityMonitorData(loads, stores, db_name, pool_all_count, pool_available_count)
for component in self.components:
component(data)
def __getattr__(self, name):
return getattr(self.base, name)
AbstractActivityMonitor = ComponentActivityMonitor # BWC
#: The data passed to :class:`ActivityMonitorComponent`
ActivityMonitorData = namedtuple('ActivityMonitorData',
('loads', 'stores', 'db_name',
'pool_all_count', 'pool_idle_count'))
class ActivityMonitorComponent(object):
"""
One component of a chain of activity monitors.
"""
__slots__ = ()
def __call__(self, data):
"""
Called when a connection has been closed.
:param ActivityMonitorData data: An instance of `ActivityMonitorData`.
"""
class LogActivityComponent(ActivityMonitorComponent):
"""
An activity monitor component that logs connection transfer information
and pool information.
.. versionchanged:: 1.3.0
Add `min_loads` and `min_stores`. These additional thresholds
are tested in addition to `min_loads_and_stores` and if any
threshold is reached the logging happens. The default value of
each threshold is 10.
The thresholds may be configured in the environment (before loading
this class) as integer strings using the values
``NTI_ZODB_LOG_MIN_LOADS``, ``NTI_ZODB_LOG_MIN_STORES``,
and ``NTI_ZODB_LOG_MIN_ACTIVITY``, respectively.
"""
#: Perform logging if the total of loads + stores is
#: at least this many.
min_loads_and_stores = _get_non_negative_integer_from_environ(
"NTI_ZODB_LOG_MIN_ACTIVITY",
10
)
#: Perform logging if the number of loads is
#: at least this many.
min_loads = _get_non_negative_integer_from_environ(
"NTI_ZODB_LOG_MIN_LOADS",
10
)
#: Perform logging if the number of stores is
#: at least this many.
min_stores = _get_non_negative_integer_from_environ(
"NTI_ZODB_LOG_MIN_STORES",
10
)
def __call__(self, data):
# type: (ActivityMonitorData) -> None
loads = data.loads
stores = data.stores
if (
loads >= self.min_loads
or stores > self.min_stores
or loads + stores >= self.min_loads_and_stores
):
logger.info(
"closedConnection={'loads': %5d, 'stores': %5d, 'database': %s, "
"'num_connections': %5d, 'num_avail_connections': %5d}",
loads, stores, data.db_name,
data.pool_all_count, data.pool_idle_count)
class LogActivityMonitor(ComponentActivityMonitor):
"""
A pre-configured :class:`ComponentActivityMonitor` that uses
:func:`LogActivityComponent`
"""
def __init__(self, base=None):
ComponentActivityMonitor.__init__(self, base, (LogActivityComponent(),))
class StatsdActivityComponent(ActivityMonitorComponent):
"""
An activity monitor component that stores counters (guages) in statsd,
if the statsd client is available.
The stats are:
- ZODB.DB.<name>.loads
How many loads the open connection performed.
- ZODB.DB.<name>.stores
How many stores the open connection performed.
- ZODB.DB.<name>.total_connections
All currently open connections, including those in use
and those in the pool.
- ZODB.DB.<name>.available_connections
The connections sitting idle in the pool.
"""
statsd_client = staticmethod(statsd_client)
def __call__(self, data):
# type: (ActivityMonitorData) -> None
statsd = self.statsd_client()
if statsd is None:
return
# Should these be counters or gauges? Or even sets?
# counters are aggregated across all instances, gauges (by default) are broken out
# by host
buf = []
statsd.gauge('ZODB.DB.' + data.db_name + '.loads',
data.loads, buf=buf)
statsd.gauge('ZODB.DB.' + data.db_name + '.stores',
data.stores, buf=buf)
statsd.gauge('ZODB.DB.' + data.db_name + '.total_connections',
data.pool_all_count, buf=buf)
statsd.gauge('ZODB.DB.' + data.db_name + '.idle_connections',
data.pool_idle_count, buf=buf)
statsd.sendbuf(buf)
class StatsdActivityMonitor(ComponentActivityMonitor):
"""
A pre-configured :class:`ComponentActivityMonitor` that uses
:func:`LogActivityComponent`
"""
def __init__(self, base=None):
ComponentActivityMonitor.__init__(self, base, (StatsdActivityComponent(),))
def register_subscriber(event):
"""
Subscriber to the :class:`zope.processlifetime.IDatabaseOpenedWithRoot`
that registers an activity monitor.
"""
# IDatabaseOpened fires for each database, so if we sub to that we'd do this many times.
# WithRoot fires only once.
for database in event.database.databases.values():
monitor = ComponentActivityMonitor(
database.getActivityMonitor(),
[LogActivityComponent(), StatsdActivityComponent()]
)
database.setActivityMonitor(monitor)
| 34.621212 | 96 | 0.66291 |
f76a6b22f829cd36368947588d4a7db54da8b5ef
| 2,691 |
py
|
Python
|
Emulator/library.py
|
samedamci/7seg-Emulator
|
6115d817395d88d6490065893748e55883faf401
|
[
"0BSD"
] | null | null | null |
Emulator/library.py
|
samedamci/7seg-Emulator
|
6115d817395d88d6490065893748e55883faf401
|
[
"0BSD"
] | null | null | null |
Emulator/library.py
|
samedamci/7seg-Emulator
|
6115d817395d88d6490065893748e55883faf401
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python3
"""
Implementation of ZeroSeg library methods with corresponding
behaviour but displayed on emulator display.
"""
from Emulator.characters import CHARS, UNDEFINED
import time
METHOD = []
class Screen:
"""
Representation of `screen` object from ZeroSeg library.
"""
def __init__(self):
pass
def set_byte(self, value: int, position: int, redraw: bool = True):
METHOD.append(f"draw_symbol(str(hex({value})), {position})")
# TODO: redraw
def __get_byte_by_char(self, char: str) -> int:
return hex(CHARS.get(str(char), UNDEFINED))
def write_char(
self, char: str, position: int = 1, dot: bool = False, redraw: bool = True
):
value = self.__get_byte_by_char(char)
METHOD.append(f"draw_symbol({str(value)}, {position})")
# TODO: redraw
def write_number(
self,
value: float,
base: int = 10,
decimal_places: int = 0,
zero_pad: bool = False,
left_justify: bool = False,
):
self.clear()
if zero_pad:
pass
if decimal_places > 0:
pass
value = list(str(value))
value.reverse()
_bytes = []
for i in value:
byte = str(self.__get_byte_by_char(str(i)))
_bytes.append(byte)
for i in range(len(_bytes)):
if left_justify:
METHOD.append(f"draw_symbol(str({_bytes[i]}), 8 - {i})")
else:
METHOD.append(f"draw_symbol(str({_bytes[i]}), {i} + 1)")
# TODO: base
def write_text(self, text: str):
self.clear()
if len(text) <= 8:
_bytes = []
for i in text:
byte = str(self.__get_byte_by_char(i))
_bytes.append(byte)
for i in range(len(text)):
METHOD.append(f"draw_symbol(str({_bytes[i]}), 8 - {i})")
else:
return
def clear(self):
METHOD.append("init_lines()")
screen = Screen()
class Button:
"""
Representation of `Button` class from ZeroSeg library.
"""
def __init__(self, button: str):
self.button = button
if button == "left" or button == "right":
pass
else:
raise ValueError("Invalid button name, allowed: 'left', 'right'.")
def pressed(self) -> bool:
time.sleep(0.01)
for _ in range(1):
from Emulator.emulator import ctx as emuctx
if self.button == "left":
return emuctx.lb_pressed
elif self.button == "right":
return emuctx.rb_pressed
| 25.628571 | 82 | 0.540691 |
f76a9e25e65c28dd22fe201da585414be6c849b9
| 1,828 |
py
|
Python
|
tests/agent/test_polynomial_model.py
|
iryzhkov/stock-trading-backend
|
7161026b7b4deb78a934b66550c85a27c6b32933
|
[
"MIT"
] | 1 |
2021-01-27T18:24:02.000Z
|
2021-01-27T18:24:02.000Z
|
tests/agent/test_polynomial_model.py
|
iryzhkov/stock-trading-backend
|
7161026b7b4deb78a934b66550c85a27c6b32933
|
[
"MIT"
] | null | null | null |
tests/agent/test_polynomial_model.py
|
iryzhkov/stock-trading-backend
|
7161026b7b4deb78a934b66550c85a27c6b32933
|
[
"MIT"
] | null | null | null |
"""Unit tests for PolynomialModel class
"""
import os
import unittest
import pandas as pd
from stock_trading_backend.agent import PolynomialModel
class TestPolynomialModel(unittest.TestCase):
"""Unit tests for PolynomialModel class.
"""
def test_initializes(self):
"""Checks if model initializes properly.
"""
model = PolynomialModel(degree=5)
self.assertEqual(5, model.degree)
with self.assertRaises(ValueError):
_ = PolynomialModel(degree=0)
def test_save_and_load(self):
"""Checks if saving and loading functin works properly.
"""
file_path = "data/test/test.pkl"
model = PolynomialModel()
observation = pd.Series([1, 2, 3], ["balance", "net_worth", "owned"])
predictions_1 = model.predict(observation, [[0, 1]] * 5)
model.save(file_path)
model.load(file_path)
predictions_2 = model.predict(observation, [[0, 1]] * 5)
self.assertTrue(all(predictions_1 == predictions_2))
os.remove(file_path)
def test_predict(self):
"""Checks if predict function works properly.
"""
model = PolynomialModel()
observation = pd.Series([1, 2, 3], ["balance", "net_worth", "owned"])
predictions = model.predict(observation, [[0, 1]] * 5)
self.assertEqual(5, len(predictions))
def test_train(self):
"""Checks if train function works properly.
"""
model = PolynomialModel(degree=2)
observations = pd.DataFrame([[1, 2, 3]] * 10, columns=["balance", "net_worth", "owned"])
actions = [[0]] * 5 + [[1]] * 5
expected_values = [[0]] * 5 + [[1]] * 5
losses = [model.train(observations, actions, expected_values) for i in range(10)]
self.assertTrue(losses[0] > losses[-1])
| 35.153846 | 96 | 0.615427 |
f76aa1d9621e88225dce18778f5a2cd7f4fce0cc
| 10,077 |
py
|
Python
|
obsolete/reports/pipeline_capseq/trackers/macs_shared_intervals.py
|
kevinrue/cgat-flow
|
02b5a1867253c2f6fd6b4f3763e0299115378913
|
[
"MIT"
] | 11 |
2018-09-07T11:33:23.000Z
|
2022-01-07T12:16:11.000Z
|
obsolete/reports/pipeline_capseq/trackers/macs_shared_intervals.py
|
kevinrue/cgat-flow
|
02b5a1867253c2f6fd6b4f3763e0299115378913
|
[
"MIT"
] | 102 |
2018-03-22T15:35:26.000Z
|
2022-03-23T17:46:16.000Z
|
obsolete/reports/pipeline_capseq/trackers/macs_shared_intervals.py
|
kevinrue/cgat-flow
|
02b5a1867253c2f6fd6b4f3763e0299115378913
|
[
"MIT"
] | 7 |
2018-06-11T15:01:41.000Z
|
2020-03-31T09:29:33.000Z
|
import os
import sys
import re
import types
import itertools
import matplotlib.pyplot as plt
import numpy
import scipy.stats
import numpy.ma
import Stats
import Histogram
from cgatReport.Tracker import *
from cpgReport import *
##########################################################################
class SharedIntervals(cpgTracker):
"""Summary stats of intervals called by the peak finder. """
mPattern = "_merged_shared_intervals$"
def __call__(self, track, slice=None):
data = self.getFirstRow(
"SELECT COUNT(*) as number, round(AVG(stop-start),0) as length FROM %(track)s_merged_shared_intervals" % locals())
return odict(list(zip(("Shared intervals", "mean_interval_length"), data)))
##########################################################################
class sharedIntervalLengths(cpgTracker):
"""Distribution of interval length. """
mPattern = "_merged_shared_intervals$"
def __call__(self, track, slice=None):
data = self.getValues(
"SELECT (stop-start) FROM %(track)s_merged_shared_intervals" % locals())
return {"length": data}
##########################################################################
class SharedIntervalPeakValues(cpgTracker):
"""Distribution of maximum interval coverage (the number of reads at peak). """
mPattern = "_merged_shared_intervals$"
def __call__(self, track, slice=None):
data = self.getValues( '''SELECT i.peakval FROM %(track)s_merged_shared_intervals u, %(track)s_macs_merged_intervals i
WHERE u.contig=i.contig
AND u.start=i.start''' % locals() )
return {"peakval": data}
##########################################################################
class SharedIntervalAverageValues(cpgTracker):
"""Distribution of average coverage (the average number of reads within the interval) """
mPattern = "_merged_shared_intervals$"
def __call__(self, track, slice=None):
data = self.getValues( '''SELECT avgval FROM %(track)s_merged_shared_intervals u, %(track)s_macs_merged_intervals i
WHERE u.contig=i.contig
AND u.start=i.start''' % locals() )
return {"avgval": data}
##########################################################################
class SharedIntervalFoldChange(cpgTracker):
"""Distribution of fold change """
mPattern = "_merged_shared_intervals$"
def __call__(self, track, slice=None):
data = self.getValues( '''SELECT i.fold FROM %(track)s_merged_shared_intervals u, %(track)s_macs_merged_intervals i
WHERE u.contig=i.contig
AND u.start=i.start''' % locals() )
return odict([("Fold Change", data)])
##########################################################################
class SharedIntervalTSS(cpgTracker):
"""Distribution of distance to closest TSS """
mPattern = "_merged_shared_intervals$"
def __call__(self, track, slice=None):
data = self.getValues( '''SELECT closest_dist FROM %(track)s_merged_shared_intervals u,
%(track)s_macs_merged_intervals i, %(track)s_merged_tss t
WHERE u.contig=i.contig
AND u.start=i.start
AND t.gene_id=i.interval_id''' % locals() )
return {"distance": data}
##########################################################################
class SharedIntervalCpGDensity(cpgTracker):
mPattern = "_merged_shared_intervals$"
def __call__(self, track, slice=None):
data = self.getAll( '''SELECT pCpG FROM %(track)s_merged_shared_intervals u,
%(track)s_macs_merged_intervals i,%(track)s_merged_composition c
WHERE u.contig=i.contig
AND u.start=i.start
AND c.gene_id=i.interval_id''' % locals() )
return data
##########################################################################
class SharedIntervalCpGObsExp1(cpgTracker):
mPattern = "_merged_shared_intervals$"
def __call__(self, track, slice=None):
data = self.getAll( '''SELECT CpG_ObsExp1 FROM %(track)s_merged_shared_intervals u,
%(track)s_macs_merged_intervals i,%(track)s_merged_composition c
WHERE u.contig=i.contig
AND u.start=i.start
AND c.gene_id=i.interval_id''' % locals() )
return data
##########################################################################
class SharedIntervalCpGObsExp2(cpgTracker):
mPattern = "_merged_shared_intervals$"
def __call__(self, track, slice=None):
data = self.getAll( '''SELECT CpG_ObsExp FROM %(track)s_merged_shared_intervals u,
%(track)s_macs_merged_intervals i,%(track)s_merged_composition c
WHERE u.contig=i.contig
AND u.start=i.start
AND c.gene_id=i.interval_id''' % locals() )
return data
##########################################################################
class SharedIntervalCpGNumber(cpgTracker):
mPattern = "_merged_shared_intervals$"
def __call__(self, track, slice=None):
data = self.getAll( '''SELECT nCpG FROM %(track)s_merged_shared_intervals u,
%(track)s_macs_merged_intervals i,%(track)s_merged_composition c
WHERE u.contig=i.contig
AND u.start=i.start
AND c.gene_id=i.interval_id''' % locals() )
return data
##########################################################################
class SharedIntervalGCContent(cpgTracker):
mPattern = "_merged_shared_intervals$"
def __call__(self, track, slice=None):
data = self.getAll( '''SELECT pGC FROM %(track)s_merged_shared_intervals u,
%(track)s_macs_merged_intervals i,%(track)s_merged_composition c
WHERE u.contig=i.contig
AND u.start=i.start
AND c.gene_id=i.interval_id''' % locals() )
return data
##########################################################################
##########################################################################
##########################################################################
class SharedIntervalLengthVsAverageValue(cpgTracker):
"""Length vs average value. """
mPattern = "_merged_shared_intervals$"
def __call__(self, track, slice=None):
data = self.get( '''SELECT length, avgval FROM %(track)s_merged_shared_intervals u, %(track)s_macs_merged_intervals i
WHERE u.contig=i.contig
AND u.start=i.start''' % locals() )
return odict(list(zip(("length", "avgval"), list(zip(*data)))))
##########################################################################
class SharedIntervalLengthVsPeakValue(cpgTracker):
"""Length vs peak value """
mPattern = "_merged_shared_intervals$"
def __call__(self, track, slice=None):
data = self.get( '''SELECT length, peakval FROM %(track)s_merged_shared_intervals u, %(track)s_macs_merged_intervals i
WHERE u.contig=i.contig
AND u.start=i.start''' % locals() )
return odict(list(zip(("length", "peakval"), list(zip(*data)))))
##########################################################################
class SharedIntervalLengthVsFoldChange(cpgTracker):
"""Length vs fold change"""
mPattern = "_merged_shared_intervals$"
def __call__(self, track, slice=None):
data = self.get( '''SELECT length, fold FROM %(track)s_merged_shared_intervals u, %(track)s_macs_merged_intervals i
WHERE u.contig=i.contig
AND u.start=i.start''' % locals() )
return odict(list(zip(("length", "foldchange"), list(zip(*data)))))
##########################################################################
class SharedIntervalAvgValVsPeakVal(cpgTracker):
"""average value vs peak value """
mPattern = "_merged_shared_intervals$"
def __call__(self, track, slice=None):
data = self.get( '''SELECT avgval, peakval FROM %(track)s_merged_shared_intervals u, %(track)s_macs_merged_intervals i
WHERE u.contig=i.contig
AND u.start=i.start''' % locals() )
return odict(list(zip(("avgval", "peakval"), list(zip(*data)))))
##########################################################################
class SharedIntervalAvgValVsFoldChange(cpgTracker):
"""average value vs fold change """
mPattern = "_merged_shared_intervals$"
def __call__(self, track, slice=None):
data = self.get( '''SELECT avgval, fold FROM %(track)s_merged_shared_intervals u, %(track)s_macs_merged_intervals i
WHERE u.contig=i.contig
AND u.start=i.start''' % locals() )
return odict(list(zip(("avgval", "foldchange"), list(zip(*data)))))
##########################################################################
class SharedIntervalPeakValVsFoldChange(cpgTracker):
"""Peak value vs fold change """
mPattern = "_merged_shared_intervals$"
def __call__(self, track, slice=None):
data = self.get( '''SELECT peakval, fold FROM %(track)s_merged_shared_intervals u, %(track)s_macs_merged_intervals i
WHERE u.contig=i.contig
AND u.start=i.start''' % locals() )
return odict(list(zip(("peakval", "foldchange"), list(zip(*data)))))
| 37.741573 | 126 | 0.51682 |
f76addef04364b01d3fa2c09b3aa617c717d940d
| 1,827 |
py
|
Python
|
unittests/test_ui_helpers.py
|
stepanandr/taf
|
75cb85861f8e9703bab7dc6195f3926b8394e3d0
|
[
"Apache-2.0"
] | 10 |
2016-12-16T00:05:58.000Z
|
2018-10-30T17:48:25.000Z
|
unittests/test_ui_helpers.py
|
stepanandr/taf
|
75cb85861f8e9703bab7dc6195f3926b8394e3d0
|
[
"Apache-2.0"
] | 40 |
2017-01-04T23:07:05.000Z
|
2018-04-16T19:52:02.000Z
|
unittests/test_ui_helpers.py
|
stepanandr/taf
|
75cb85861f8e9703bab7dc6195f3926b8394e3d0
|
[
"Apache-2.0"
] | 23 |
2016-12-30T05:03:53.000Z
|
2020-04-01T08:40:24.000Z
|
# coding=utf-8
# Copyright (c) 2011 - 2017, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``test_ui_helpers.py``
`Unittests for ui_helpers.py`
"""
from unittest.mock import MagicMock
import pytest
from .test_ui_onpss_shell import ports_side_effect, multicall_ports_side_effect
from testlib import ui_onpss_shell
@pytest.fixture
def ui_onpss():
return ui_onpss_shell.UiOnpssShell(MagicMock(**{"hw": type("SiliconFM10K", (object,), {})()}))
class TestLagHelpers(object):
@pytest.fixture(autouse=True)
def create_ui(self, ui_onpss):
self.ui_onpss = ui_onpss
ui_onpss.switch.ssh.exec_command = MagicMock(side_effect=ports_side_effect)
ui_onpss.cli_multicall = MagicMock(side_effect=multicall_ports_side_effect)
def test_is_lag_added(self):
assert self.ui_onpss.is_lag_added(lag_id=1234)
assert self.ui_onpss.is_lag_added(lag_id='team1')
assert not self.ui_onpss.is_lag_added(lag_id='1234')
def test_is_port_added_to_lag(self):
assert self.ui_onpss.is_port_added_to_lag(port=5, lag_id='team1')
assert self.ui_onpss.is_port_added_to_lag(port=8, lag_id=1234)
assert not self.ui_onpss.is_port_added_to_lag(port=5, lag_id='team2')
assert not self.ui_onpss.is_port_added_to_lag(port=8, lag_id='1234')
| 33.218182 | 98 | 0.746579 |
f76ae73e89dce3c5941fed39652c2ad93ec2e096
| 6,054 |
py
|
Python
|
simplebgc/command_names.py
|
maiermic/robot-cameraman
|
afe441cf1677322062da77aa782a08f10440fcc7
|
[
"MIT"
] | 7 |
2019-11-17T13:18:07.000Z
|
2022-03-13T04:14:15.000Z
|
simplebgc/command_names.py
|
maiermic/robot-cameraman
|
afe441cf1677322062da77aa782a08f10440fcc7
|
[
"MIT"
] | 14 |
2021-02-03T16:21:21.000Z
|
2022-03-11T23:57:48.000Z
|
simplebgc/command_names.py
|
maiermic/robot-cameraman
|
afe441cf1677322062da77aa782a08f10440fcc7
|
[
"MIT"
] | 2 |
2021-04-21T21:22:49.000Z
|
2021-05-28T17:39:13.000Z
|
from typing import Optional
from simplebgc.command_ids import *
INCOMING_COMMAND_NAMES = {
CMD_BOARD_INFO: 'CMD_BOARD_INFO',
CMD_BOARD_INFO_3: 'CMD_BOARD_INFO_3',
CMD_READ_PARAMS_3: 'CMD_READ_PARAMS_3',
CMD_READ_PARAMS_EXT: 'CMD_READ_PARAMS_EXT',
CMD_READ_PARAMS_EXT2: 'CMD_READ_PARAMS_EXT2',
CMD_READ_PARAMS_EXT3: 'CMD_READ_PARAMS_EXT3',
CMD_REALTIME_DATA_3: 'CMD_REALTIME_DATA_3',
CMD_REALTIME_DATA_4: 'CMD_REALTIME_DATA_4',
CMD_CONFIRM: 'CMD_CONFIRM',
CMD_ERROR: 'CMD_ERROR',
CMD_GET_ANGLES: 'CMD_GET_ANGLES',
CMD_GET_ANGLES_EXT: 'CMD_GET_ANGLES_EXT',
CMD_READ_PROFILE_NAMES: 'CMD_READ_PROFILE_NAMES',
CMD_I2C_READ_REG_BUF: 'CMD_I2C_READ_REG_BUF',
CMD_AUTO_PID: 'CMD_AUTO_PID',
CMD_DEBUG_VARS_INFO_3: 'CMD_DEBUG_VARS_INFO_3',
CMD_DEBUG_VARS_3: 'CMD_DEBUG_VARS_3',
CMD_READ_EXTERNAL_DATA: 'CMD_READ_EXTERNAL_DATA',
CMD_SET_ADJ_VARS_VAL: 'CMD_SET_ADJ_VARS_VAL',
CMD_READ_ADJ_VARS_CFG: 'CMD_READ_ADJ_VARS_CFG',
CMD_RESET: 'CMD_RESET',
CMD_EEPROM_READ: 'CMD_EEPROM_READ',
CMD_CALIB_INFO: 'CMD_CALIB_INFO',
CMD_READ_FILE: 'CMD_READ_FILE',
CMD_SCRIPT_DEBUG: 'CMD_SCRIPT_DEBUG',
CMD_AHRS_HELPER: 'CMD_AHRS_HELPER',
CMD_REALTIME_DATA_CUSTOM: 'CMD_REALTIME_DATA_CUSTOM',
CMD_ADJ_VARS_STATE: 'CMD_ADJ_VARS_STATE',
CMD_READ_RC_INPUTS: 'CMD_READ_RC_INPUTS',
CMD_EVENT: 'CMD_EVENT',
CMD_EXT_IMU_DEBUG_INFO: 'CMD_EXT_IMU_DEBUG_INFO',
}
OUTGOING_COMMAND_NAMES = {
CMD_BOARD_INFO: 'CMD_BOARD_INFO',
CMD_BOARD_INFO_3: 'CMD_BOARD_INFO_3',
CMD_REALTIME_DATA: 'CMD_REALTIME_DATA',
CMD_REALTIME_DATA_3: 'CMD_REALTIME_DATA_3',
CMD_REALTIME_DATA_4: 'CMD_REALTIME_DATA_4',
CMD_CALIB_ACC: 'CMD_CALIB_ACC',
CMD_CALIB_GYRO: 'CMD_CALIB_GYRO',
CMD_CALIB_MAG: 'CMD_CALIB_MAG',
CMD_CALIB_EXT_GAIN: 'CMD_CALIB_EXT_GAIN',
CMD_USE_DEFAULTS: 'CMD_USE_DEFAULTS',
CMD_CALIB_POLES: 'CMD_CALIB_POLES',
CMD_READ_RC_INPUTS: 'CMD_READ_RC_INPUTS',
CMD_READ_PARAMS: 'CMD_READ_PARAMS',
CMD_READ_PARAMS_3: 'CMD_READ_PARAMS_3',
CMD_READ_PARAMS_EXT: 'CMD_READ_PARAMS_EXT',
CMD_READ_PARAMS_EXT2: 'CMD_READ_PARAMS_EXT2',
CMD_READ_PARAMS_EXT3: 'CMD_READ_PARAMS_EXT3',
CMD_WRITE_PARAMS: 'CMD_WRITE_PARAMS',
CMD_WRITE_PARAMS_3: 'CMD_WRITE_PARAMS_3',
CMD_WRITE_PARAMS_EXT: 'CMD_WRITE_PARAMS_EXT',
CMD_WRITE_PARAMS_EXT2: 'CMD_WRITE_PARAMS_EXT2',
CMD_WRITE_PARAMS_EXT3: 'CMD_WRITE_PARAMS_EXT3',
CMD_RESET: 'CMD_RESET',
CMD_BOOT_MODE_3: 'CMD_BOOT_MODE_3',
CMD_CALIB_OFFSET: 'CMD_CALIB_OFFSET',
CMD_CALIB_BAT: 'CMD_CALIB_BAT',
CMD_CONTROL: 'CMD_CONTROL',
CMD_CONTROL_CONFIG: 'CMD_CONTROL_CONFIG',
CMD_TRIGGER_PIN: 'CMD_TRIGGER_PIN',
CMD_MOTORS_ON: 'CMD_MOTORS_ON',
CMD_MOTORS_OFF: 'CMD_MOTORS_OFF',
CMD_EXECUTE_MENU: 'CMD_EXECUTE_MENU',
CMD_HELPER_DATA: 'CMD_HELPER_DATA',
CMD_GET_ANGLES: 'CMD_GET_ANGLES',
CMD_GET_ANGLES_EXT: 'CMD_GET_ANGLES_EXT',
CMD_SELECT_IMU_3: 'CMD_SELECT_IMU_3',
CMD_READ_PROFILE_NAMES: 'CMD_READ_PROFILE_NAMES',
CMD_WRITE_PROFILE_NAMES: 'CMD_WRITE_PROFILE_NAMES',
CMD_SET_ADJ_VARS_VAL: 'CMD_SET_ADJ_VARS_VAL',
CMD_GET_ADJ_VARS_VAL: 'CMD_GET_ADJ_VARS_VAL',
CMD_SAVE_PARAMS_3: 'CMD_SAVE_PARAMS_3',
CMD_AUTO_PID: 'CMD_AUTO_PID',
CMD_AUTO_PID2: 'CMD_AUTO_PID2',
CMD_SERVO_OUT: 'CMD_SERVO_OUT',
CMD_I2C_WRITE_REG_BUF: 'CMD_I2C_WRITE_REG_BUF',
CMD_I2C_READ_REG_BUF: 'CMD_I2C_READ_REG_BUF',
CMD_DEBUG_VARS_INFO_3: 'CMD_DEBUG_VARS_INFO_3',
CMD_DEBUG_VARS_3: 'CMD_DEBUG_VARS_3',
CMD_WRITE_EXTERNAL_DATA: 'CMD_WRITE_EXTERNAL_DATA',
CMD_READ_EXTERNAL_DATA: 'CMD_READ_EXTERNAL_DATA',
CMD_API_VIRT_CH_CONTROL: 'CMD_API_VIRT_CH_CONTROL',
CMD_API_VIRT_CH_HIGH_RES: 'CMD_API_VIRT_CH_HIGH_RES',
CMD_READ_ADJ_VARS_CFG: 'CMD_READ_ADJ_VARS_CFG',
CMD_WRITE_ADJ_VARS_CFG: 'CMD_WRITE_ADJ_VARS_CFG',
CMD_EEPROM_WRITE: 'CMD_EEPROM_WRITE',
CMD_EEPROM_READ: 'CMD_EEPROM_READ',
CMD_CALIB_INFO: 'CMD_CALIB_INFO',
CMD_READ_FILE: 'CMD_READ_FILE',
CMD_WRITE_FILE: 'CMD_WRITE_FILE',
CMD_FS_CLEAR_ALL: 'CMD_FS_CLEAR_ALL',
CMD_RUN_SCRIPT: 'CMD_RUN_SCRIPT',
CMD_AHRS_HELPER: 'CMD_AHRS_HELPER',
CMD_GYRO_CORRECTION: 'CMD_GYRO_CORRECTION',
CMD_DATA_STREAM_INTERVAL: 'CMD_DATA_STREAM_INTERVAL',
CMD_REALTIME_DATA_CUSTOM: 'CMD_REALTIME_DATA_CUSTOM',
CMD_BEEP_SOUND: 'CMD_BEEP_SOUND',
CMD_ENCODERS_CALIB_OFFSET_4: 'CMD_ENCODERS_CALIB_OFFSET_4',
CMD_ENCODERS_CALIB_FLD_OFFSET_4: 'CMD_ENCODERS_CALIB_FLD_OFFSET_4',
CMD_ADJ_VARS_STATE: 'CMD_ADJ_VARS_STATE',
CMD_CALIB_ORIENT_CORR: 'CMD_CALIB_ORIENT_CORR',
CMD_CALIB_ACC_EXT_REF: 'CMD_CALIB_ACC_EXT_REF',
CMD_PROFILE_SET: 'CMD_PROFILE_SET',
}
UNDOCUMENTED_COMMAND_NAMES = {
CMD_QUEUE_PARAMS_INFO_3: 'CMD_QUEUE_PARAMS_INFO_3',
CMD_SYSTEM_STATE: 'CMD_SYSTEM_STATE',
CMD_CALIB_MOTOR_MAG_LINK: 'CMD_CALIB_MOTOR_MAG_LINK',
CMD_COGGING_CALIB_INFO: 'CMD_COGGING_CALIB_INFO',
CMD_CALIB_COGGING: 'CMD_CALIB_COGGING',
CMD_CAN_DEVICE_SCAN: 'CMD_CAN_DEVICE_SCAN',
CMD_CAN_DRV_HARD_PARAMS: 'CMD_CAN_DRV_HARD_PARAMS',
CMD_CAN_DRV_STATE: 'CMD_CAN_DRV_STATE',
CMD_CAN_DRV_CALIBRATE: 'CMD_CAN_DRV_CALIBRATE',
CMD_REALTIME_DATA_CAN_DRV: 'CMD_REALTIME_DATA_CAN_DRV',
CMD_SET_DEVICE_ADDR: 'CMD_SET_DEVICE_ADDR',
CMD_EXT_IMU_CMD: 'CMD_EXT_IMU_CMD',
CMD_READ_STATE_VARS: 'CMD_READ_STATE_VARS',
CMD_WRITE_STATE_VARS: 'CMD_WRITE_STATE_VARS',
CMD_SERIAL_PROXY: 'CMD_SERIAL_PROXY',
CMD_IMU_ADVANCED_CALIB: 'CMD_IMU_ADVANCED_CALIB',
CMD_SET_DEBUG_PORT: 'CMD_SET_DEBUG_PORT',
CMD_MAVLINK_INFO: 'CMD_MAVLINK_INFO',
CMD_MAVLINK_DEBUG: 'CMD_MAVLINK_DEBUG',
}
def get_incoming_command_name(command_id: int) -> Optional[str]:
return INCOMING_COMMAND_NAMES.get(command_id)
def get_outgoing_command_name(command_id: int) -> Optional[str]:
return OUTGOING_COMMAND_NAMES.get(command_id)
def get_undocumented_command_name(command_id: int) -> Optional[str]:
return UNDOCUMENTED_COMMAND_NAMES.get(command_id)
| 41.183673 | 71 | 0.780806 |
f76ae7a1a52374b4e4c42f47fcd617945cfefdec
| 4,385 |
py
|
Python
|
src/mbed_tools/cli/build.py
|
ladislas/mbed-tools
|
2ab99a15a38704574cd1e379ad4e09dd994db49c
|
[
"Apache-2.0"
] | null | null | null |
src/mbed_tools/cli/build.py
|
ladislas/mbed-tools
|
2ab99a15a38704574cd1e379ad4e09dd994db49c
|
[
"Apache-2.0"
] | null | null | null |
src/mbed_tools/cli/build.py
|
ladislas/mbed-tools
|
2ab99a15a38704574cd1e379ad4e09dd994db49c
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (C) 2020 Arm Mbed. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Command to build/compile an Mbed project using CMake."""
import os
import pathlib
import shutil
import click
from mbed_tools.build import build_project, generate_build_system, generate_config, flash_binary
from mbed_tools.devices import find_connected_device
from mbed_tools.project import MbedProgram
from mbed_tools.sterm import terminal
@click.command(name="compile", help="Build an Mbed project.")
@click.option(
"-t",
"--toolchain",
type=click.Choice(["ARM", "GCC_ARM"], case_sensitive=False),
help="The toolchain you are using to build your app.",
)
@click.option("-m", "--mbed-target", help="A build target for an Mbed-enabled device, e.g. K64F.")
@click.option("-b", "--build-type", default="develop", help="The build type (release, develop or debug).")
@click.option("-c", "--clean", is_flag=True, default=False, help="Perform a clean build.")
@click.option(
"-p",
"--program-path",
default=os.getcwd(),
help="Path to local Mbed program. By default it is the current working directory.",
)
@click.option(
"-f", "--flash", is_flag=True, default=False, help="Flash the binary onto a device",
)
@click.option(
"--hex-file", is_flag=True, default=False, help="Use hex file, this option should be used with '-f/--flash' option",
)
@click.option(
"-s", "--sterm", is_flag=True, default=False, help="Launch a serial terminal to the device.",
)
@click.option(
"--baudrate",
default=9600,
show_default=True,
help="Change the serial baud rate (ignored unless --sterm is also given).",
)
def build(
program_path: str,
build_type: str,
toolchain: str = "",
mbed_target: str = "",
clean: bool = False,
flash: bool = False,
hex_file: bool = False,
sterm: bool = False,
baudrate: int = 9600,
) -> None:
"""Configure and build an Mbed project using CMake and Ninja.
If the project has already been configured and contains '.mbedbuild/mbed_config.cmake', this command will skip the
Mbed configuration step and invoke CMake.
If the CMake configuration step has already been run previously (i.e a CMake build tree exists), then just try to
build the project immediately using Ninja.
Args:
program_path: Path to the Mbed project.
build_type: The Mbed build profile (debug, develop or release).
toolchain: The toolchain to use for the build.
mbed_target: The name of the Mbed target to build for.
clean: Perform a clean build.
flash: Flash the binary onto a device.
hex_file: Use hex file, this option should be used with '-f/--flash' option.
sterm: Open a serial terminal to the connected target.
baudrate: Change the serial baud rate (ignored unless --sterm is also given).
"""
program = MbedProgram.from_existing(pathlib.Path(program_path))
mbed_config_file = program.files.cmake_config_file
build_tree = program.files.cmake_build_dir
if clean and build_tree.exists():
shutil.rmtree(build_tree)
if any([not mbed_config_file.exists(), not build_tree.exists(), mbed_target, toolchain]):
click.echo("Configuring project and generating build system...")
_validate_target_and_toolchain_args(mbed_target, toolchain)
generate_config(mbed_target.upper(), toolchain, program)
generate_build_system(program.root, build_tree, build_type)
click.echo("Building Mbed project...")
build_project(build_tree)
if flash or sterm:
dev = find_connected_device(mbed_target)
if flash:
flash_binary(dev.mount_points[0].resolve(), program.root, build_tree, mbed_target, hex_file)
elif hex_file:
click.echo("'--hex-file' option should be used with '-f/--flash' option")
if sterm:
if dev.serial_port is None:
raise click.ClickException(
f"The connected device {dev.mbed_board.board_name} does not have an associated serial port."
" Reconnect the device and try again."
)
terminal.run(dev.serial_port, baudrate)
def _validate_target_and_toolchain_args(target: str, toolchain: str) -> None:
if not all([toolchain, target]):
raise click.UsageError("--toolchain and --mbed-target arguments are required when generating Mbed config!")
| 38.130435 | 120 | 0.693273 |
f76af01402c84679e4cd7f2bf52f632e7cb9898c
| 2,870 |
py
|
Python
|
contrib/avro/tests/python/pants_test/contrib/avro/tasks/test_avro_gen.py
|
ghthor/pants
|
450de702414f87f563081ddefaefd8a554de07a3
|
[
"Apache-2.0"
] | null | null | null |
contrib/avro/tests/python/pants_test/contrib/avro/tasks/test_avro_gen.py
|
ghthor/pants
|
450de702414f87f563081ddefaefd8a554de07a3
|
[
"Apache-2.0"
] | null | null | null |
contrib/avro/tests/python/pants_test/contrib/avro/tasks/test_avro_gen.py
|
ghthor/pants
|
450de702414f87f563081ddefaefd8a554de07a3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from textwrap import dedent
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants_test.jvm.nailgun_task_test_base import NailgunTaskTestBase
from pants.contrib.avro.targets.java_avro_library import JavaAvroLibrary
from pants.contrib.avro.tasks.avro_gen import AvroJavaGenTask
class MockAvroJavaGenTest(AvroJavaGenTask):
_test_cmd_log = [] # List of lists for commands run by the task under test.
# Overide this method and record the command that would have been run.
def _avro(self, args):
self._test_cmd_log.append(args)
def _test_reset(self):
self._test_cmd_log = []
class AvroJavaGenTest(NailgunTaskTestBase):
@classmethod
def task_type(cls):
return MockAvroJavaGenTest
@classmethod
def alias_groups(cls):
return super(AvroJavaGenTest, cls).alias_groups().merge(
BuildFileAliases(targets={'java_avro_library': JavaAvroLibrary}))
def _test_avro(self, target_spec):
target = self.target(target_spec)
context = self.context(target_roots=[target])
task = self.prepare_execute(context)
task._test_reset()
task.execute()
return task
def test_avro_java_gen(self):
# Disable lookup of avro-tools since not used for this unit test.
self.set_options(runtime_deps=[])
self.add_to_build_file('avro-build', dedent('''
java_avro_library(name='avro-schema',
sources=['src/avro/schema.avsc'],
)
java_avro_library(name='avro-protocol',
sources=['src/avro/protocol.avpl'],
)
java_avro_library(name='avro-idl',
sources=['src/avro/record.avdl'],
)
'''))
self.create_file(relpath='avro-build/src/avro/schema.avsc', contents=dedent('''
{
"namespace": "",
"type": "record",
"name": "Person",
"fields": [
{"name": "name", "type": "string"},
{"name": "age", "type": "int"}
]
}
'''))
self.create_file(relpath='avro-build/src/avro/record.avdl', contents=dedent('''
protocol Test {
void test();
}
'''))
task = self._test_avro('avro-build:avro-schema')
self.assertEquals(len(task._test_cmd_log), 1)
self.assertEquals(task._test_cmd_log[0][:-1], ['compile', 'schema', 'avro-build/src/avro/schema.avsc'])
task = self._test_avro('avro-build:avro-idl')
self.assertEquals(len(task._test_cmd_log), 2)
self.assertEquals(task._test_cmd_log[0][:-1], ['idl', 'avro-build/src/avro/record.avdl'])
generated_protocol_json_file = task._test_cmd_log[0][-1]
self.assertEquals(task._test_cmd_log[1][:-1], ['compile', 'protocol', generated_protocol_json_file])
| 32.613636 | 107 | 0.690592 |
f76b03664b94f1df49f1b8557993ecf73c280f01
| 254 |
py
|
Python
|
pyror/utils.py
|
sckott/pyror
|
e50f71cf668110844ecd74e5451cd88c2298fd3c
|
[
"MIT"
] | 2 |
2021-12-15T16:15:07.000Z
|
2021-12-24T18:09:05.000Z
|
pyror/utils.py
|
sckott/pyror
|
e50f71cf668110844ecd74e5451cd88c2298fd3c
|
[
"MIT"
] | null | null | null |
pyror/utils.py
|
sckott/pyror
|
e50f71cf668110844ecd74e5451cd88c2298fd3c
|
[
"MIT"
] | null | null | null |
def parse_ids(d):
vars = ["id", "name", "links"]
z = {v: d[v] for v in vars}
z["grid_id"] = d["external_ids"]["GRID"]["preferred"]
z["link"] = None
if len(z["links"]):
z["link"] = z["links"][0]
z.pop("links")
return z
| 25.4 | 57 | 0.488189 |
f76b46803457256da0f70e1dafb45e853c8d7f42
| 3,785 |
py
|
Python
|
More/E02_Flat/E12_TextSize.py
|
freder/PageBotExamples
|
eb4ced53a673b9376e8357afa9ea0795b022b13c
|
[
"Ruby",
"MIT"
] | 5 |
2020-06-20T22:01:23.000Z
|
2021-08-06T04:39:50.000Z
|
More/E02_Flat/E12_TextSize.py
|
freder/PageBotExamples
|
eb4ced53a673b9376e8357afa9ea0795b022b13c
|
[
"Ruby",
"MIT"
] | 5 |
2020-05-17T09:32:27.000Z
|
2021-03-15T19:45:52.000Z
|
More/E02_Flat/E12_TextSize.py
|
freder/PageBotExamples
|
eb4ced53a673b9376e8357afa9ea0795b022b13c
|
[
"Ruby",
"MIT"
] | 2 |
2021-02-25T19:07:45.000Z
|
2022-01-09T21:14:06.000Z
|
#!/usr/bin/env python3
#
# Copyright (c) 2017 Thom Janssen <https://github.com/thomgb>
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# E12_TextSize.py
import os
from flat import rgb, font, shape, strike, document, paragraph, text
from pagebot.fonttoolbox.objects.font import findFont
from pagebot.toolbox.loremipsum import loremIpsum
EXPORT_PATH = '_export/12_TextSize.pdf'
if not os.path.exists('_export'):
os.mkdir('_export')
W, H = 1000, 1000
R = 20 # radius of origin marker
# Get the PageFont instance
pbFont = findFont('PageBot-Bold')
#pbFont = findFont('Georgia')
#pbFont = findFont('Verdana')
#pbFont = findFont('PageBot-Book')
#pbFont = findFont('Times New Roman')
#pbFont = findFont('Stickway-Bold')
#pbFont = findFont('ResponderP-Black')
flatFont = font.open(pbFont.path)
fontSize = 100
fontSize3 = fontSize*3
red = rgb(255, 0, 50)
black = rgb(0, 0, 0)
figure = shape().stroke(black).width(1)
# Flat has origin in top-left
fDoc = document(W, H, 'pt')
fPage = fDoc.addpage()
txt = loremIpsum()
P = 80 # Page padding
x = P
# Size of a string
y = H/2-2*P # Padding from above.
st = strike(flatFont).color(red).size(fontSize3, leading=0).tracking(0)
paragraphs = [paragraph([st.span('Hkpx')])]
placedText = fPage.place(text(paragraphs))
# Placing as frame seems to give better control on vertical position.
#placedText.position(x, y) # Placing on baseline
ascender = fontSize3*pbFont.info.typoAscender/pbFont.info.unitsPerEm
# Baseline of the blox on M from top-left
placedText.frame(x, y-ascender, W-2*P, fontSize3)
# Draw rectangle of calculated width/height
fPage.place(figure.rectangle(x, y-ascender, placedText.width, placedText.height))
# Cross hair marker on text origin
fPage.place(figure.circle(x, y, R))
fPage.place(figure.line(x-R, y, x+R, y))
fPage.place(figure.line(x, y-R, x, y+R))
# Size of a text box.
# Text has to be split, not to contain newlines, into paragraphs.
y = H/2+P # Flat has origin on top left
paragraphs = []
for txtParagraph in txt.split('\n'):
st = strike(flatFont).color(red).size(fontSize, leading=fontSize)
paragraphs.append(paragraph([st.span(txtParagraph)]))
placedText = fPage.place(text(paragraphs))
# Placing as frame seems to give better control on vertical position.
#placedText.position(x, y) # Placing on baseline
ascender = fontSize*pbFont.info.typoAscender/pbFont.info.unitsPerEm
# Baseline of the block on M from top-left
placedText.frame(x, y-ascender, W-2*P, 4*fontSize)
# Fontsize is now distance to the next line
fPage.place(figure.rectangle(x, y, fontSize, fontSize))
fPage.place(figure.rectangle(x+fontSize, y+fontSize, fontSize, fontSize))
fPage.place(figure.rectangle(x+2*fontSize, y+2*fontSize, fontSize, fontSize))
yy = y
for lIndex, (height, run) in enumerate(placedText.layout.runs()):
if lIndex > 0:
yy += height
fPage.place(figure.line(x-P/2, yy, x+P/2, yy))
# Shifted by ascender is the CSS box around the line.
fPage.place(figure.rectangle(x+3*fontSize, y-ascender, fontSize, fontSize))
fPage.place(figure.rectangle(x+4*fontSize, y+fontSize-ascender, fontSize, fontSize))
fPage.place(figure.rectangle(x+5*fontSize, y+2*fontSize-ascender, fontSize, fontSize))
fPage.place(figure.rectangle(x+6*fontSize, y+3*fontSize-ascender, fontSize, fontSize))
# Draw rectangle of calculated width/height, different from 4xfontSize
fPage.place(figure.rectangle(x, y-ascender, placedText.width, placedText.height))
# Cross hair marker on text origin
fPage.place(figure.circle(x, y, R))
fPage.place(figure.line(x-R, y, x+R, y))
fPage.place(figure.line(x, y-R, x, y+R))
fDoc.pdf(EXPORT_PATH)
print('Done', EXPORT_PATH)
| 35.046296 | 86 | 0.721532 |
f76b7ea404c80b2efbd4605d73fff2789e56b96d
| 6,440 |
py
|
Python
|
tests/release_test.py
|
shouldsee/cathpy
|
5f7fa1322434b2d254f0158c5840f029b12dbafe
|
[
"MIT"
] | 7 |
2019-04-15T19:18:44.000Z
|
2022-02-07T20:43:45.000Z
|
tests/release_test.py
|
shouldsee/cathpy
|
5f7fa1322434b2d254f0158c5840f029b12dbafe
|
[
"MIT"
] | 14 |
2019-04-24T11:47:38.000Z
|
2021-06-09T08:43:14.000Z
|
tests/release_test.py
|
shouldsee/cathpy
|
5f7fa1322434b2d254f0158c5840f029b12dbafe
|
[
"MIT"
] | 4 |
2019-04-15T16:32:12.000Z
|
2021-03-05T02:54:51.000Z
|
import difflib
import filecmp
import logging
from random import shuffle
import os
import tempfile
from cathpy.core.release import (
CathDomainList, CathNamesList, CathDomall,
CathDomainListEntry, CathDomallEntry, )
from . import testutils
LOG = logging.getLogger(__name__)
def cmp_file_contents(f1, f2, rstrip=False, max_diff=50):
with open(f1) as fh1:
with open(f2) as fh2:
lines1 = [l for l in fh1 if not l.startswith('#')]
lines2 = [l for l in fh2 if not l.startswith('#')]
if rstrip:
lines1 = [l.rstrip() for l in lines1]
lines2 = [l.rstrip() for l in lines2]
diff = difflib.unified_diff(
lines1, lines2, fromfile=f1, tofile=f2)
diff_lines = list(diff)
if diff_lines:
LOG.info("DIFF: %s %s", f1, f2)
for idx, d in enumerate(diff_lines):
LOG.info("%s", d.strip())
if idx > max_diff:
break
return len(diff_lines)
class TestDomainList(testutils.TestBase):
def setUp(self):
self.domainlist_file = os.path.join(os.path.dirname(
__file__), 'data', 'release', 'CathDomainList')
self.domainlist = CathDomainList.from_file(self.domainlist_file)
def test_domainlist(self):
tmplistfile = tempfile.NamedTemporaryFile(mode='wt')
domainlist = self.domainlist
self.assertEqual(len(domainlist), 984)
domainlist.to_file(tmplistfile.name)
cmp_file_contents(self.domainlist_file, tmplistfile.name)
self.assertEqual(cmp_file_contents(
self.domainlist_file, tmplistfile.name), 0)
domentry = domainlist[3]
self.assertEqual(domentry.domain_id, '3friA01')
self.assertEqual(domentry.cath_id, '1.10.8.10.2.1.1.1.2')
self.assertEqual(domentry.sfam_id, '1.10.8.10')
self.assertEqual([d.domain_id for d in domainlist[2:4]], [
'3frhA01', '3friA01'])
self.assertEqual(domainlist['3friA01'].domain_id, '3friA01')
self.assertEqual(domainlist['3friA01'].cath_id, '1.10.8.10.2.1.1.1.2')
def test_sort(self):
entries = [e for e in self.domainlist.entries]
listcopy = CathDomainList(entries=entries)
self.assertEqual(self.domainlist.entries, listcopy.entries)
shuffle(listcopy.entries)
self.assertNotEqual(self.domainlist.entries, listcopy.entries)
sorted_entries = sorted(listcopy.entries)
listsorted = CathDomainList(entries=sorted_entries)
self.assertEqual(self.domainlist.entries, listsorted.entries)
# sort in place doesn't work yet
listcopy.sort()
self.assertEqual(self.domainlist.entries, listcopy.entries)
def test_filter_cath_id(self):
filteredlist = self.domainlist.filter_cath_id('1.10.8.20')
expected_domains = {
'3hx3A01': '1.10.8.20.1.1.1.1.1',
'4cj6A01': '1.10.8.20.1.1.1.2.1',
'3hy5A01': '1.10.8.20.5.1.1.1.1',
'4cizA01': '1.10.8.20.5.1.1.1.2',
}
entries = filteredlist.entries
self.assertEqual(
{c.domain_id: c.cath_id for c in entries[:2] + entries[-2:]}, expected_domains)
def test_filter_reps(self):
domainlist = self.domainlist
hreps = domainlist.filter_reps(4)
sreps = domainlist.filter_reps(5)
self.assertIsInstance(hreps, CathDomainList)
expected_hreps = ['1.10.8.10', '1.10.8.20',
'1.10.8.40', '1.10.8.50', '1.10.8.60']
self.assertEqual([c.cath_id_to_depth(4)
for c in hreps.entries], expected_hreps)
self.assertIsInstance(sreps[0], CathDomainListEntry)
self.assertEqual(sreps[0].cath_id, '1.10.8.10.1.1.1.1.1')
self.assertEqual(len(sreps[:3]), 3)
self.assertEqual(len(sreps[-3:]), 3)
expected_sreps = {
'1oaiA00': '1.10.8.10.1',
'3frhA01': '1.10.8.10.2',
'4g3oA00': '1.10.8.10.3',
'1c1kA01': '1.10.8.60.10',
'4lrtC02': '1.10.8.60.11',
'2fnaA02': '1.10.8.60.12', }
self.assertEqual({c.domain_id: c.cath_id_to_depth(5)
for c in sreps[:3] + sreps[-3:]}, expected_sreps)
class TestNamesList(testutils.TestBase):
def setUp(self):
self.namelist_file = os.path.join(os.path.dirname(
__file__), 'data', 'release', 'CathNames')
def test_nameslist(self):
tmplistfile = tempfile.NamedTemporaryFile(mode='wt')
namelist = CathNamesList.from_file(self.namelist_file)
self.assertEqual(len(namelist), 984)
namelist.to_file(tmplistfile.name)
self.assertEqual(cmp_file_contents(
self.namelist_file, tmplistfile.name, rstrip=True), 0)
self.assertEqual(namelist[0].cath_id, '1')
self.assertEqual([n.cath_id for n in namelist[5:7]], ['1.20', '1.25'])
self.assertEqual([n.name for n in namelist[5:7]], [
'Up-down Bundle', 'Alpha Horseshoe'])
class TestDomallList(testutils.TestBase):
def setUp(self):
self.domall_file = os.path.join(os.path.dirname(
__file__), 'data', 'release', 'CathDomall')
def test_line(self):
entry_strings = (
'10gsA D02 F01 2 A 2 - A 78 - A 187 - A 208 - 1 A 79 - A 186 - A 209 - A 209 - (1)',
'1adiB D03 F00 2 B 1 - B 100 - B 201 - B 265 - 1 B 101 - B 200 - 1 B 266 - B 431 -',
)
entry = CathDomallEntry.from_string(entry_strings[0])
self.assertEqual(entry.chain_id, '10gsA')
self.assertEqual(len(entry.domains), 2)
self.assertEqual(len(entry.fragments), 1)
for entry_string in entry_strings:
entry = CathDomallEntry.from_string(entry_string)
self.assertEqual(entry.to_string(), entry_string)
def test_domall(self):
tmplistfile = tempfile.NamedTemporaryFile(mode='wt')
domall = CathDomall.from_file(self.domall_file)
self.assertEqual(len(domall), 982)
self.assertEqual([d.chain_id for d in domall[3:5]], ['103lA', '103mA'])
domall.to_file(tmplistfile.name)
self.assertEqual(cmp_file_contents(
self.domall_file, tmplistfile.name), 0)
domall.to_file(tmplistfile.name)
newlist = CathDomall.from_file(tmplistfile.name)
self.assertEqual(len(newlist), 982)
| 35.777778 | 114 | 0.612112 |
f76b874b3d8d6f4615dcff0240b5324571cbdb34
| 2,421 |
py
|
Python
|
nova/api/openstack/compute/image_size.py
|
ebalduf/nova-backports
|
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
|
[
"Apache-2.0"
] | 5 |
2016-04-28T16:20:38.000Z
|
2021-04-25T11:19:03.000Z
|
nova/api/openstack/compute/image_size.py
|
ebalduf/nova-backports
|
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/image_size.py
|
ebalduf/nova-backports
|
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
|
[
"Apache-2.0"
] | 5 |
2020-04-08T20:24:45.000Z
|
2020-10-05T19:02:13.000Z
|
# Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
ALIAS = "image-size"
authorize = extensions.os_compute_soft_authorizer(ALIAS)
class ImageSizeController(wsgi.Controller):
def _extend_image(self, image, image_cache):
# NOTE(mriedem): The OS-EXT-* prefix should not be used for new
# attributes after v2.1. They are only in v2.1 for backward compat
# with v2.0.
key = "OS-EXT-IMG-SIZE:size"
image[key] = image_cache['size']
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ["nova.context"]
if authorize(context):
image_resp = resp_obj.obj['image']
# image guaranteed to be in the cache due to the core API adding
# it in its 'show' method
image_cached = req.get_db_item('images', image_resp['id'])
self._extend_image(image_resp, image_cached)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
images_resp = list(resp_obj.obj['images'])
# images guaranteed to be in the cache due to the core API adding
# it in its 'detail' method
for image in images_resp:
image_cached = req.get_db_item('images', image['id'])
self._extend_image(image, image_cached)
class ImageSize(extensions.V21APIExtensionBase):
"""Adds image size to image listings."""
name = "ImageSize"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = ImageSizeController()
extension = extensions.ControllerExtension(self, 'images', controller)
return [extension]
def get_resources(self):
return []
| 35.086957 | 78 | 0.664188 |
f76b876bb98483bf9ebfa2dd86b540870ca32ae4
| 2,970 |
py
|
Python
|
ngsutils/fastq/tile.py
|
bgruening/ngsutils
|
417e90dc1918fb553dd84990f2c54bd8cea8f44d
|
[
"BSD-3-Clause"
] | 57 |
2015-03-09T01:26:45.000Z
|
2022-02-22T07:26:01.000Z
|
ngsutils/fastq/tile.py
|
bgruening/ngsutils
|
417e90dc1918fb553dd84990f2c54bd8cea8f44d
|
[
"BSD-3-Clause"
] | 33 |
2015-02-03T23:24:46.000Z
|
2022-03-16T20:08:10.000Z
|
ngsutils/fastq/tile.py
|
bgruening/ngsutils
|
417e90dc1918fb553dd84990f2c54bd8cea8f44d
|
[
"BSD-3-Clause"
] | 33 |
2015-01-18T16:47:47.000Z
|
2022-02-22T07:28:09.000Z
|
#!/usr/bin/env python
## category General
## desc Splits long FASTQ reads into smaller (tiled) chunks
'''
For each read in a FASTQ file, split it into smaller (overlapping) chunks.
Fragments are defined by their length and offset. For example, if the length
is 35 and the offset is 10, sub-reads will be 1->35, 11->45, 21->55, etc... If
the offset and the length are the same, then the fragments will not overlap.
Output is a set of (gzip compressed) FASTQ files
'''
import os
import sys
import gzip
from ngsutils.fastq import FASTQ
def _open_file(outbase, i, gz, quiet=False):
if gz:
fn = '%s.%s.fastq.gz' % (outbase, i + 1)
tmp = os.path.join(os.path.dirname(fn), '.tmp.%s' % os.path.basename(fn))
if not quiet:
sys.stderr.write('Output file: %s\n' % fn)
return (gzip.open(tmp, 'w'), tmp, fn)
else:
fn = '%s.%s.fastq' % (outbase, i + 1)
tmp = os.path.join(os.path.dirname(fn), '.tmp.%s' % os.path.basename(fn))
if not quiet:
sys.stderr.write('Output file: %s\n' % fn)
return (open(tmp, 'w'), tmp, fn)
def fastq_tile(fname, outbase, length, offset, gz=False, quiet=False):
fastq = FASTQ(fname)
outs = []
fnames = []
for read in fastq.fetch(quiet=quiet):
out_idx = 0
pos = 0
while pos + length < len(read.seq):
if len(outs) <= out_idx:
fobj, tmp, fn = _open_file(outbase, out_idx, gz, quiet)
outs.append(fobj)
fnames.append((tmp, fn))
read.subseq(pos, pos + length, comment="#tile:%s,%s" % (pos, pos + length)).write(outs[out_idx])
pos += offset
out_idx += 1
for out in outs:
out.close()
fastq.close()
for tmp, fname in fnames:
os.rename(tmp, fname)
def usage(msg=None):
if msg:
print msg
print __doc__
print """\
Usage: fastqutils tile {opts} filename.fastq{.gz} out_template
Options:
-len val Length of each fragment (default: 35)
-offset val Offset for each fragment (default: 10)
-gz gzip compress the output FASTQ files
"""
sys.exit(1)
if __name__ == '__main__':
fname = None
outtemplate = None
gz = False
length = 35
offset = 10
last = None
for arg in sys.argv[1:]:
if arg == '-h':
usage()
if last == '-len':
length = int(arg)
last = None
elif last == '-offset':
offset = int(arg)
last = None
elif arg == '-gz':
gz = True
elif arg in ['-len', '-offset']:
last = arg
elif not fname:
if not os.path.exists(arg):
usage("Missing file: %s" % arg)
fname = arg
elif not outtemplate:
outtemplate = arg
if not fname or not outtemplate:
usage()
fastq_tile(fname, outtemplate, length, offset, gz)
| 25.826087 | 108 | 0.556229 |
f76b8872041317052b828349b8676f8291d948a9
| 717 |
py
|
Python
|
Examples/ultrassonic_thread_HC_SR04.py
|
BosonsHiggs/arduPython
|
77114f70f5338109be1a440f8b35f833dc6bba1a
|
[
"MIT"
] | null | null | null |
Examples/ultrassonic_thread_HC_SR04.py
|
BosonsHiggs/arduPython
|
77114f70f5338109be1a440f8b35f833dc6bba1a
|
[
"MIT"
] | null | null | null |
Examples/ultrassonic_thread_HC_SR04.py
|
BosonsHiggs/arduPython
|
77114f70f5338109be1a440f8b35f833dc6bba1a
|
[
"MIT"
] | 1 |
2022-01-05T00:01:46.000Z
|
2022-01-05T00:01:46.000Z
|
from threading import Thread
from pyfirmata import Arduino, pyfirmata, util
from pyfirmata.util import ping_time_to_distance
import time
### Start of pin configuration
board = Arduino() # or Arduino(port) define board
print("Communication successfully started!")
it = util.Iterator(board)
it.start()
sonarEcho = board.get_pin('d:7:o')
time.sleep(1)
### End set pins
class Echo(Thread):
def __init__ (self, echoPino):
Thread.__init__(self)
self.echoPino = echoPino
def run(self):
while True:
time = self.echoPino.ping()
board.pass_time(0.06) #delay of 60ms -> see datasheet
print(f"Time: {time}ms, distance: {ping_time_to_distance(time)}cm")
inicioEcho = Echo(sonarEcho)
inicioEcho.start()
| 22.40625 | 70 | 0.736402 |
f76bd294fae0619751a2773d8ea522cec7ed752e
| 2,748 |
py
|
Python
|
archive/nexus-api-v2/Database/AGILE/Models/__init__.py
|
cloud-hybrid/delta
|
402b00ed5aaa32ccef628361e9635879b7ace44f
|
[
"BSD-3-Clause"
] | null | null | null |
archive/nexus-api-v2/Database/AGILE/Models/__init__.py
|
cloud-hybrid/delta
|
402b00ed5aaa32ccef628361e9635879b7ace44f
|
[
"BSD-3-Clause"
] | null | null | null |
archive/nexus-api-v2/Database/AGILE/Models/__init__.py
|
cloud-hybrid/delta
|
402b00ed5aaa32ccef628361e9635879b7ace44f
|
[
"BSD-3-Clause"
] | 1 |
2022-01-03T05:33:15.000Z
|
2022-01-03T05:33:15.000Z
|
#!/usr/bin/env python3
# -*- Coding: UTF-8 -*- #
# -*- System: Linux -*- #
# -*- Usage: *.py -*- #
# Owner: Jacob B. Sanders
# Source: code.cloud-technology.io
# License: BSD 2-Clause License
"""
...
"""
# =============================================================================
# Standard Library
# =============================================================================
import os
import sys
import uuid
import pkgutil
import secrets
import datetime
# =============================================================================
# External
# =============================================================================
import pydantic
import sqlalchemy
import sqlalchemy.orm
import sqlalchemy.dialects
import sqlalchemy.dialects.postgresql
# =============================================================================
# Local Imports
# =============================================================================
import Database
# =============================================================================
# Type Declarations & Importable(s)
# =============================================================================
Stamp = datetime.datetime.utcnow
Secret = pydantic.SecretStr
# ... Email = pydantic.EmailStr
Time = sqlalchemy.sql.func
# -----------------------------------------------------------------------------
# Object Reference Mapping(s)
# -----------------------------------------------------------------------------
Relationship = sqlalchemy.orm.relationship
Enumeration = sqlalchemy.Enum
Constraint = sqlalchemy.PrimaryKeyConstraint
MetaData = sqlalchemy.MetaData
Foreign = sqlalchemy.ForeignKey
Integer = sqlalchemy.dialects.postgresql.INTEGER
Boolean = sqlalchemy.dialects.postgresql.BOOLEAN
Column = sqlalchemy.Column
String = sqlalchemy.dialects.postgresql.VARCHAR
Array = sqlalchemy.dialects.postgresql.ARRAY
Check = sqlalchemy.CheckConstraint
Table = sqlalchemy.Table
Date = sqlalchemy.dialects.postgresql.TIMESTAMP
UUID = sqlalchemy.dialects.postgresql.UUID
# =============================================================================
# Packaged Exports
# =============================================================================
UID = lambda: "{0}".format(uuid.uuid4()).upper()
Day = lambda: datetime.datetime.today().isoweekday()
ISO8601 = lambda: datetime.datetime.today().strftime("%U")
Year = lambda: datetime.datetime.today().year
__all__ = [
"Database",
"Boolean",
"Column",
"Constraint",
"Foreign",
"Integer",
"String",
"Relationship",
"Array",
"MetaData",
"Secret",
"Date",
"Time",
"Enumeration",
"Check",
# ... "Email",
"secrets",
"UUID",
"UID",
"ISO8601",
"Year",
"Day"
]
| 26.171429 | 79 | 0.456332 |
f76bdb60235f8b7b014972c2244610c34c36244d
| 1,309 |
py
|
Python
|
documentstore_migracao/utils/extract_isis.py
|
joffilyfe/document-store-migracao
|
b5125b7aedec56f0e8787900bdfd124aaf65e3e3
|
[
"BSD-2-Clause"
] | null | null | null |
documentstore_migracao/utils/extract_isis.py
|
joffilyfe/document-store-migracao
|
b5125b7aedec56f0e8787900bdfd124aaf65e3e3
|
[
"BSD-2-Clause"
] | 14 |
2019-03-13T12:19:12.000Z
|
2019-03-19T17:37:08.000Z
|
documentstore_migracao/utils/extract_isis.py
|
joffilyfe/document-store-migracao
|
b5125b7aedec56f0e8787900bdfd124aaf65e3e3
|
[
"BSD-2-Clause"
] | 3 |
2019-03-12T18:55:55.000Z
|
2019-03-20T18:38:02.000Z
|
import os
import logging
import shlex
import subprocess
from documentstore_migracao import config, exceptions
logger = logging.getLogger(__name__)
ISIS2JSON_PATH = "%s/documentstore_migracao/utils/isis2json/isis2json.py" % (
config.BASE_PATH
)
def create_output_dir(path):
output_dir = "/".join(path.split("/")[:-1])
if not os.path.exists(output_dir):
logger.debug("Creating folder: %s", output_dir)
os.makedirs(output_dir)
def run(path: str, output_file: str):
"""Roda um subprocesso com o isis2json de target para extrair dados
de uma base ISIS em formato MST. O resultado da extração
é armazenado em formato JSON em arquivo determinado pelo
parâmetro output_file.
"""
command = "java -cp %s org.python.util.jython %s -t 3 -p 'v' -o %s %s" % (
config.get("CLASSPATH"),
ISIS2JSON_PATH,
output_file,
path,
)
try:
logger.debug("Extracting database file: %s" % path)
subprocess.run(
shlex.split(command),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
)
logger.debug("Writing extracted result as JSON file in: %s" % output_file)
except Exception as exc:
raise exceptions.ExtractError(str(exc)) from None
| 27.270833 | 82 | 0.652406 |
f76be302e883e9c77a2704e4f6e350157aa438a9
| 471 |
py
|
Python
|
app/core/migrations/0003_user_date_of_birth.py
|
neomodular/recipe-app-api
|
43ffa6258f08e3685f3921d1314fdd90a946bb40
|
[
"MIT"
] | null | null | null |
app/core/migrations/0003_user_date_of_birth.py
|
neomodular/recipe-app-api
|
43ffa6258f08e3685f3921d1314fdd90a946bb40
|
[
"MIT"
] | null | null | null |
app/core/migrations/0003_user_date_of_birth.py
|
neomodular/recipe-app-api
|
43ffa6258f08e3685f3921d1314fdd90a946bb40
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-05-03 03:55
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('core', '0002_user_last_name'),
]
operations = [
migrations.AddField(
model_name='user',
name='date_of_birth',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
]
| 22.428571 | 74 | 0.624204 |
f76c4398c22868754722b1040e9359ef9fbd49e9
| 1,871 |
py
|
Python
|
hs_core/tests/api/rest/test_custom_scimeta.py
|
hydroshare/hydroshare
|
bf9888bbe61507aff070b1dfcec2fdec1921468d
|
[
"BSD-3-Clause"
] | 178 |
2015-01-08T23:03:36.000Z
|
2022-03-03T13:56:45.000Z
|
hs_core/tests/api/rest/test_custom_scimeta.py
|
hydroshare/hydroshare
|
bf9888bbe61507aff070b1dfcec2fdec1921468d
|
[
"BSD-3-Clause"
] | 4,125 |
2015-01-01T14:26:15.000Z
|
2022-03-31T16:38:55.000Z
|
hs_core/tests/api/rest/test_custom_scimeta.py
|
hydroshare/hydroshare
|
bf9888bbe61507aff070b1dfcec2fdec1921468d
|
[
"BSD-3-Clause"
] | 53 |
2015-03-15T17:56:51.000Z
|
2022-03-17T00:32:16.000Z
|
from rest_framework import status
from hs_core.hydroshare import resource
from .base import HSRESTTestCase
class TestCustomScimetaEndpoint(HSRESTTestCase):
def setUp(self):
super(TestCustomScimetaEndpoint, self).setUp()
self.rtype = 'GenericResource'
self.title = 'My Test resource'
res = resource.create_resource(self.rtype,
self.user,
self.title)
self.pid = res.short_id
self.resources_to_delete.append(self.pid)
def test_custom_metadata_multiple(self):
custom_metadata = "/hsapi/resource/%s/scimeta/custom/" % self.pid
response = self.client.post(custom_metadata, {
"foo": "bar",
"foo2": "bar2"
}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.get(custom_metadata)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(str(response.content.decode()), '{"foo": "bar", "foo2": "bar2"}')
def test_custom_metadata_single(self):
custom_metadata = "/hsapi/resource/%s/scimeta/custom/" % self.pid
response = self.client.post(custom_metadata, {
"foo": "bar"
}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.get(custom_metadata)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(str(response.content.decode()), '{"foo": "bar"}')
def test_custom_metadata_empty(self):
custom_metadata = "/hsapi/resource/%s/scimeta/custom/" % self.pid
response = self.client.get(custom_metadata)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(str(response.content.decode()), '{}')
| 35.980769 | 90 | 0.644041 |
f76c471ca99f9342435ae01def537f7c480334fb
| 188 |
py
|
Python
|
tests/apps/foobwp/config/settings.py
|
blazelibs/blazeweb
|
b120a6a2e38c8b53da2b73443ff242e2d1438053
|
[
"BSD-3-Clause"
] | null | null | null |
tests/apps/foobwp/config/settings.py
|
blazelibs/blazeweb
|
b120a6a2e38c8b53da2b73443ff242e2d1438053
|
[
"BSD-3-Clause"
] | 6 |
2016-11-01T18:42:34.000Z
|
2020-11-16T16:52:14.000Z
|
tests/apps/foobwp/config/settings.py
|
blazelibs/blazeweb
|
b120a6a2e38c8b53da2b73443ff242e2d1438053
|
[
"BSD-3-Clause"
] | 1 |
2020-01-22T18:20:46.000Z
|
2020-01-22T18:20:46.000Z
|
from blazeweb.config import ComponentSettings
class Settings(ComponentSettings):
def init(self):
self.add_route('/foo', 'foo:UserUpdate')
self.for_me.fooattr = True
| 20.888889 | 48 | 0.702128 |
f76c6346cd739fa7d37cea8c8351b042a6ff0db9
| 1,080 |
py
|
Python
|
setup.py
|
heslegrave/connectedcars-python
|
29dca7bb33d549dbb4803688032ae3a13b932eba
|
[
"MIT"
] | 1 |
2021-04-02T17:25:20.000Z
|
2021-04-02T17:25:20.000Z
|
setup.py
|
heslegrave/connectedcars-python
|
29dca7bb33d549dbb4803688032ae3a13b932eba
|
[
"MIT"
] | null | null | null |
setup.py
|
heslegrave/connectedcars-python
|
29dca7bb33d549dbb4803688032ae3a13b932eba
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup
with open('README.md', 'r') as file:
long_description = file.read()
with open('requirements.txt') as file:
install_requires = [line.rstrip('\r\n') for line in file]
setup(
name = 'connectedcars',
packages = ['connectedcars'],
version = '0.1.3',
license = 'MIT',
description = 'Wrapper for access the Connected Cars API - an AVL/data collection service installed in most new danish vehicles from Audi, Volkswagen, Skoda and SEAT.',
long_description = long_description,
long_description_content_type = 'text/markdown',
author = 'Niklas Christoffer Petersen',
author_email = '[email protected]',
url = 'https://github.com/niklascp/connectedcars-python',
download_url = 'https://github.com/niklascp/connectedcars-python/archive/v0.1.0.tar.gz',
keywords = ['AVL', 'Audi', 'Volkswagen', 'Skoda', 'SEAT'],
install_requires = install_requires,
classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| 36 | 170 | 0.700926 |
f76c84d38b7a4d9df48ec05bbb14f378e145ce58
| 7,785 |
py
|
Python
|
tools/redfish-schema/generate_from_schema.py
|
jenningsloy318/gofish
|
3dd54eaf633b27b447e1e13e7409a8e16037f788
|
[
"Apache-2.0"
] | null | null | null |
tools/redfish-schema/generate_from_schema.py
|
jenningsloy318/gofish
|
3dd54eaf633b27b447e1e13e7409a8e16037f788
|
[
"Apache-2.0"
] | null | null | null |
tools/redfish-schema/generate_from_schema.py
|
jenningsloy318/gofish
|
3dd54eaf633b27b447e1e13e7409a8e16037f788
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import io
import logging
import pprint
import textwrap
import jinja2
import requests
LOG = logging.getLogger(__name__)
SCHEMA_BASE = 'http://redfish.dmtf.org/schemas/'
#SCHEMA_BASE = 'http://redfish.dmtf.org/schemas/swordfish/v1/'
COMMON_NAME_CHANGES = {
'Oem': 'OEM',
'Id': 'ID',
}
COMMON_DESC = {
'Description': 'Description provides a description of this resource.',
'Id': 'ID uniquely identifies the resource.',
'Name': 'Name is the name of the resource or array element.',
'@odata.context': 'ODataContext is the odata context.',
'@odata.etag': 'ODataEtag is the odata etag.',
'@odata.id': 'ODataID is the odata identifier.',
'@odata.type': 'ODataType is the odata type.',
'Identifier': 'Identifier shall be unique within the managed ecosystem.',
}
numberwords = {
'1': 'One',
'2': 'Two',
'3': 'Three',
'4': 'Four',
'5': 'Five',
'6': 'Six',
'7': 'Seven',
'8': 'Eight',
'9': 'Nine',
}
def _ident(name):
outname = name
outname = outname.replace('-','_') # converts dashes to underbars
outname = outname.replace('switch','Switch') # Watch out for keyword switch
outname = outname.replace(' ','') # Collapse spaces
outname = outname.replace(':','_') # Collapse spaces
outname = outname.replace('/','_div_')
outname = outname.replace('+','_plus_')
#### not working yet
if len(outname) == 1:
if outname[0:1].isdigit():
outname = numberwords[outname[0]]
return outname
def _format_comment(name, description, cutpoint='used', add=' is'):
if name in COMMON_DESC:
return '// %s' % COMMON_DESC[name]
if cutpoint not in description:
cutpoint = ''
lines = textwrap.wrap(
'%s%s %s' % (name, add, description[description.index(cutpoint):]))
return '\n'.join([('// %s' % l) for l in lines])
def _get_desc(obj):
desc = obj.get('longDescription')
if not desc:
desc = obj.get('description', '')
return desc
def _get_type(name, obj):
result = 'string'
tipe = obj.get('type')
anyof = obj.get('anyOf') or obj.get('items', {}).get('anyOf')
if 'count' in name.lower():
result = 'int'
elif name == 'Status':
result = 'common.Status'
elif name == 'Identifier':
result = 'common.Identifier'
elif name == 'Description':
result = 'string'
elif tipe == 'object':
result = name
elif isinstance(tipe, list):
for kind in tipe:
if kind == 'null':
continue
if kind == 'integer' or kind == 'number':
result = 'int'
elif kind == 'boolean':
result = 'bool'
else:
result = kind
elif isinstance(anyof, list):
for kind in anyof:
if '$ref' in kind:
result = kind['$ref'].split('/')[-1]
elif '$ref' in obj.get('items', {}):
result = obj['items']['$ref'].split('/')[-1]
elif name[:1] == name[:1].lower() and 'odata' not in name.lower():
result = 'common.Link'
if tipe == 'array':
result = '[]' + result
if 'odata' in name or name in COMMON_NAME_CHANGES:
result = '%s `json:"%s"`' % (result, name)
return result
def _add_object(params, name, obj):
"""Adds object information to our template parameters."""
class_info = {
'name': name,
'identname' : _ident(name),
'description': _format_comment(name, _get_desc(obj)),
'attrs': []}
for prop in obj.get('properties', []):
if prop in ['Name', 'Id']:
continue
prawp = obj['properties'][prop]
if prawp.get('deprecated'):
continue
attr = {'name': COMMON_NAME_CHANGES.get(prop, prop)}
if '@odata' in prop:
props = prop.split('.')
replacement = 'OData'
if 'count' in props[-1]:
replacement = ''
attr['name'] = '%s%s' % (
props[0].replace('@odata', replacement), props[-1].title())
attr['type'] = _get_type(prop, prawp)
attr['description'] = _format_comment(
prop, _get_desc(prawp))
class_info['attrs'].append(attr)
params['classes'].append(class_info)
def _add_enum(params, name, enum):
"""Adds enum information to our template parameteres."""
enum_info = {
'name': name,
'identname' : _ident(name),
'description': _format_comment(name, _get_desc(enum)),
'members': []}
for en in enum.get('enum', []):
member = {'identname': _ident(en), 'name': en}
if enum.get('enumLongDescriptions', {}).get(en):
desc = enum.get('enumLongDescriptions', {}).get(en)
else:
desc = enum.get('enumDescriptions', {}).get(en, '')
member['description'] = _format_comment(_ident('%s%s' % (en, name)), desc, cutpoint='shall', add='')
enum_info['members'].append(member)
params['enums'].append(enum_info)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'object',
help='The Redfish schema object to process.')
parser.add_argument(
'-o',
'--output-file',
help='File to write results to. Default is to stdout.')
parser.add_argument(
'-v', '--verbose', action='store_true',
help='Emit verbose output to help debug.')
parser.add_argument(
'-s', '--source',
help='Specify source template file.')
args = parser.parse_args()
url = '%s%s.json' % (SCHEMA_BASE, args.object)
LOG.debug(url)
sourcefile = '%s' % (args.source)
data = requests.get(url)
try:
base_data = data.json()
except Exception:
LOG.exception('Error with data:\n%s' % data)
return
for classdef in base_data.get('definitions', []):
if classdef == args.object:
refs = base_data['definitions'][classdef].get('anyOf', [])
for ref in refs:
reflink = ref.get('$ref', '')
if 'idRef' in reflink:
continue
refurl = reflink.split('#')[0]
if refurl > url:
url = refurl
break
object_data = requests.get(url).json()
params = {'object_name': args.object, 'classes': [], 'enums': []}
for name in object_data['definitions']:
if name == 'Actions':
continue
definition = object_data['definitions'][name]
if definition.get('type') == 'object':
properties = definition.get('properties', '')
if not ('target' in properties and 'title' in properties):
_add_object(params, _ident(name), definition)
elif definition.get('enum'):
_add_enum(params, name, definition)
else:
LOG.debug('Skipping %s', definition)
with io.open('source.go', 'r', encoding='utf-8') as f:
template_body = f.read()
template = jinja2.Template(template_body)
print(template.render(**params))
if __name__ == '__main__':
main()
| 31.26506 | 108 | 0.569814 |
f76cbcb22efe4781bbabfef60fd776d57df55325
| 22,670 |
py
|
Python
|
src/sage/geometry/hyperplane_arrangement/hyperplane.py
|
saraedum/sage-renamed
|
d2da67b14da2ad766a5906425d60d43a3b3e1270
|
[
"BSL-1.0"
] | 3 |
2016-06-19T14:48:31.000Z
|
2022-01-28T08:46:01.000Z
|
src/sage/geometry/hyperplane_arrangement/hyperplane.py
|
rwst/sage
|
a9d274b9338e6ee24bf35ea8d25875507e51e455
|
[
"BSL-1.0"
] | null | null | null |
src/sage/geometry/hyperplane_arrangement/hyperplane.py
|
rwst/sage
|
a9d274b9338e6ee24bf35ea8d25875507e51e455
|
[
"BSL-1.0"
] | 7 |
2021-11-08T10:01:59.000Z
|
2022-03-03T11:25:52.000Z
|
r"""
Hyperplanes
.. NOTE::
If you want to learn about Sage's hyperplane arrangements then you
should start with
:mod:`sage.geometry.hyperplane_arrangement.arrangement`. This
module is used to represent the individual hyperplanes, but you
should never construct the classes from this module directly (but
only via the
:class:`~sage.geometry.hyperplane_arrangement.arrangement.HyperplaneArrangements`.
A linear expression, for example, `3x+3y-5z-7` stands for the
hyperplane with the equation `x+3y-5z=7`. To create it in Sage, you
first have to create a
:class:`~sage.geometry.hyperplane_arrangement.arrangement.HyperplaneArrangements`
object to define the variables `x`, `y`, `z`::
sage: H.<x,y,z> = HyperplaneArrangements(QQ)
sage: h = 3*x + 2*y - 5*z - 7; h
Hyperplane 3*x + 2*y - 5*z - 7
sage: h.coefficients()
[-7, 3, 2, -5]
sage: h.normal()
(3, 2, -5)
sage: h.constant_term()
-7
sage: h.change_ring(GF(3))
Hyperplane 0*x + 2*y + z + 2
sage: h.point()
(21/38, 7/19, -35/38)
sage: h.linear_part()
Vector space of degree 3 and dimension 2 over Rational Field
Basis matrix:
[ 1 0 3/5]
[ 0 1 2/5]
Another syntax to create hyperplanes is to specify coefficients and a
constant term::
sage: V = H.ambient_space(); V
3-dimensional linear space over Rational Field with coordinates x, y, z
sage: h in V
True
sage: V([3, 2, -5], -7)
Hyperplane 3*x + 2*y - 5*z - 7
Or constant term and coefficients together in one list/tuple/iterable::
sage: V([-7, 3, 2, -5])
Hyperplane 3*x + 2*y - 5*z - 7
sage: v = vector([-7, 3, 2, -5]); v
(-7, 3, 2, -5)
sage: V(v)
Hyperplane 3*x + 2*y - 5*z - 7
Note that the constant term comes first, which matches the notation
for Sage's :func:`~sage.geometry.polyhedron.constructor.Polyhedron` ::
sage: Polyhedron(ieqs=[(4,1,2,3)]).Hrepresentation()
(An inequality (1, 2, 3) x + 4 >= 0,)
The difference between hyperplanes as implemented in this module and
hyperplane arrangements is that:
* hyperplane arrangements contain multiple hyperplanes (of course),
* linear expressions are a module over the base ring, and these module
structure is inherited by the hyperplanes.
The latter means that you can add and multiply by a scalar::
sage: h = 3*x + 2*y - 5*z - 7; h
Hyperplane 3*x + 2*y - 5*z - 7
sage: -h
Hyperplane -3*x - 2*y + 5*z + 7
sage: h + x
Hyperplane 4*x + 2*y - 5*z - 7
sage: h + 7
Hyperplane 3*x + 2*y - 5*z + 0
sage: 3*h
Hyperplane 9*x + 6*y - 15*z - 21
sage: h * RDF(3)
Hyperplane 9.0*x + 6.0*y - 15.0*z - 21.0
Which you can't do with hyperplane arrangements::
sage: arrangement = H(h, x, y, x+y-1); arrangement
Arrangement <y | x | x + y - 1 | 3*x + 2*y - 5*z - 7>
sage: arrangement + x
Traceback (most recent call last):
...
TypeError: unsupported operand parent(s) for +:
'Hyperplane arrangements in 3-dimensional linear space
over Rational Field with coordinates x, y, z' and
'Hyperplane arrangements in 3-dimensional linear space
over Rational Field with coordinates x, y, z'
"""
#*****************************************************************************
# Copyright (C) 2013 David Perkinson <[email protected]>
# Volker Braun <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.misc.cachefunc import cached_method
from sage.geometry.linear_expression import LinearExpression, LinearExpressionModule
class Hyperplane(LinearExpression):
"""
A hyperplane.
You should always use :class:`AmbientVectorSpace` to construct
instances of this class.
INPUT:
- ``parent`` -- the parent :class:`AmbientVectorSpace`
- ``coefficients`` -- a vector of coefficients of the linear variables
- ``constant`` -- the constant term for the linear expression
EXAMPLES::
sage: H.<x,y> = HyperplaneArrangements(QQ)
sage: x+y-1
Hyperplane x + y - 1
sage: ambient = H.ambient_space()
sage: ambient._element_constructor_(x+y-1)
Hyperplane x + y - 1
For technical reasons, we must allow the degenerate cases of
an empty space and of a full space::
sage: 0*x
Hyperplane 0*x + 0*y + 0
sage: 0*x + 1
Hyperplane 0*x + 0*y + 1
sage: x + 0 == x + ambient(0) # because coercion requires them
True
"""
def __init__(self, parent, coefficients, constant):
"""
Initialize ``self``.
TESTS::
sage: H.<x,y> = HyperplaneArrangements(QQ)
sage: x.change_ring(RR)
Hyperplane 1.00000000000000*x + 0.000000000000000*y + 0.000000000000000
sage: TestSuite(x+y-1).run()
"""
super(Hyperplane, self).__init__(parent, coefficients, constant)
def _repr_(self):
"""
Return a string representation.
OUTPUT:
A string.
EXAMPLES::
sage: H.<x> = HyperplaneArrangements(QQ)
sage: x._repr_()
'Hyperplane x + 0'
"""
return 'Hyperplane {0}'.format(self._repr_linear())
def _latex_(self):
r"""
Return a LaTeX representation.
OUTPUT:
A string.
EXAMPLES::
sage: H.<x> = HyperplaneArrangements(QQ)
sage: V = H.ambient_space()
sage: V([2, -3])._latex_()
'$-3x = -2$'
sage: H.<x, y, z> = HyperplaneArrangements(QQ)
sage: V = H.ambient_space()
sage: V([-5, 1, 3, 0])._latex_()
'$x + 3y = 5$'
sage: V([4, 1, 0, -1])._latex_()
'$x - z = -4$'
"""
linear = self._repr_linear(include_zero=False, include_constant=False, multiplication='')
s = '{0} = {1}'.format(linear, -self.b())
return '${0}$'.format(s)
def normal(self):
"""
Return the normal vector.
OUTPUT:
A vector over the base ring.
EXAMPLES::
sage: H.<x, y, z> = HyperplaneArrangements(QQ)
sage: x.normal()
(1, 0, 0)
sage: x.A(), x.b()
((1, 0, 0), 0)
sage: (x + 2*y + 3*z + 4).normal()
(1, 2, 3)
"""
return self.A()
def _normal_pivot(self):
"""
Return the index of the largest entry of the normal vector.
OUTPUT:
An integer. The index of the largest entry.
EXAMPLES::
sage: H.<x,y,z> = HyperplaneArrangements(QQ)
sage: V = H.ambient_space()
sage: (x + 3/2*y - 2*z)._normal_pivot()
2
sage: H.<x,y,z> = HyperplaneArrangements(GF(5))
sage: V = H.ambient_space()
sage: (x + 3*y - 4*z)._normal_pivot()
1
"""
try:
values = [abs(x) for x in self.A()]
except ArithmeticError:
from sage.rings.all import RDF
values = [abs(RDF(x)) for x in self.A()]
max_pos = 0
max_value = values[max_pos]
for i in range(1, len(values)):
if values[i] > max_value:
max_pos = i
max_value = values[i]
return max_pos
def __contains__(self, q):
r"""
Test whether the point ``q`` is in the hyperplane.
INPUT:
- ``q`` -- point (as a vector, list, or tuple)
OUTPUT:
A boolean.
EXAMPLES::
sage: H.<x,y,z> = HyperplaneArrangements(QQ)
sage: h = x + y + z - 1
sage: (1/3, 1/3, 1/3) in h
True
sage: (0,0,0) in h
False
"""
V = self.parent().ambient_vector_space()
q = V(q)
return self.A() * q + self._const == 0
@cached_method
def polyhedron(self):
"""
Return the hyperplane as a polyhedron.
OUTPUT:
A :func:`~sage.geometry.polyhedron.constructor.Polyhedron` instance.
EXAMPLES::
sage: H.<x,y,z> = HyperplaneArrangements(QQ)
sage: h = x + 2*y + 3*z - 4
sage: P = h.polyhedron(); P
A 2-dimensional polyhedron in QQ^3 defined as the convex hull of 1 vertex and 2 lines
sage: P.Hrepresentation()
(An equation (1, 2, 3) x - 4 == 0,)
sage: P.Vrepresentation()
(A line in the direction (0, 3, -2),
A line in the direction (3, 0, -1),
A vertex at (0, 0, 4/3))
"""
from sage.geometry.polyhedron.constructor import Polyhedron
R = self.parent().base_ring()
return Polyhedron(eqns=[self.coefficients()], base_ring=R)
@cached_method
def linear_part(self):
r"""
The linear part of the affine space.
OUTPUT:
Vector subspace of the ambient vector space, parallel to the
hyperplane.
EXAMPLES::
sage: H.<x,y,z> = HyperplaneArrangements(QQ)
sage: h = x + 2*y + 3*z - 1
sage: h.linear_part()
Vector space of degree 3 and dimension 2 over Rational Field
Basis matrix:
[ 1 0 -1/3]
[ 0 1 -2/3]
"""
AA = self.parent().ambient_module()
from sage.matrix.constructor import matrix
return matrix(AA.base_ring(), [self.A()]).right_kernel()
def linear_part_projection(self, point):
"""
Orthogonal projection onto the linear part.
INPUT:
- ``point`` -- vector of the ambient space, or anything that
can be converted into one; not necessarily on the
hyperplane
OUTPUT:
Coordinate vector of the projection of ``point`` with respect
to the basis of :meth:`linear_part`. In particular, the length
of this vector is one less than the ambient space
dimension.
EXAMPLES::
sage: H.<x,y,z> = HyperplaneArrangements(QQ)
sage: h = x + 2*y + 3*z - 4
sage: h.linear_part()
Vector space of degree 3 and dimension 2 over Rational Field
Basis matrix:
[ 1 0 -1/3]
[ 0 1 -2/3]
sage: p1 = h.linear_part_projection(0); p1
(0, 0)
sage: p2 = h.linear_part_projection([3,4,5]); p2
(8/7, 2/7)
sage: h.linear_part().basis()
[
(1, 0, -1/3),
(0, 1, -2/3)
]
sage: p3 = h.linear_part_projection([1,1,1]); p3
(4/7, 1/7)
"""
point = self.orthogonal_projection(point) - self.point()
return self.linear_part().coordinate_vector(point)
@cached_method
def point(self):
"""
Return the point closest to the origin.
OUTPUT:
A vector of the ambient vector space. The closest point to the
origin in the `L^2`-norm.
In finite characteristic a random point will be returned if
the norm of the hyperplane normal vector is zero.
EXAMPLES::
sage: H.<x,y,z> = HyperplaneArrangements(QQ)
sage: h = x + 2*y + 3*z - 4
sage: h.point()
(2/7, 4/7, 6/7)
sage: h.point() in h
True
sage: H.<x,y,z> = HyperplaneArrangements(GF(3))
sage: h = 2*x + y + z + 1
sage: h.point()
(1, 0, 0)
sage: h.point().base_ring()
Finite Field of size 3
sage: H.<x,y,z> = HyperplaneArrangements(GF(3))
sage: h = x + y + z + 1
sage: h.point()
(2, 0, 0)
"""
P = self.parent()
AA = P.ambient_module()
R = P.base_ring()
norm2 = sum(x**2 for x in self.A())
if norm2 == 0:
from sage.matrix.constructor import matrix, vector
solution = matrix(R, self.A()).solve_right(vector(R, [-self.b()]))
else:
solution = [-x * self.b() / norm2 for x in self.A()]
return AA(solution)
def dimension(self):
r"""
The dimension of the hyperplane.
OUTPUT:
An integer.
EXAMPLES::
sage: H.<x,y,z> = HyperplaneArrangements(QQ)
sage: h = x + y + z - 1
sage: h.dimension()
2
"""
return self.linear_part().dimension()
def intersection(self, other):
r"""
The intersection of ``self`` with ``other``.
INPUT:
- ``other`` -- a hyperplane, a polyhedron, or something that
defines a polyhedron
OUTPUT:
A polyhedron.
EXAMPLES::
sage: H.<x,y,z> = HyperplaneArrangements(QQ)
sage: h = x + y + z - 1
sage: h.intersection(x - y)
A 1-dimensional polyhedron in QQ^3 defined as the convex hull of 1 vertex and 1 line
sage: h.intersection(polytopes.cube())
A 2-dimensional polyhedron in QQ^3 defined as the convex hull of 3 vertices
"""
from sage.geometry.polyhedron.base import is_Polyhedron
from sage.geometry.polyhedron.constructor import Polyhedron
if not is_Polyhedron(other):
try:
other = other.polyhedron()
except AttributeError:
other = Polyhedron(other)
return self.polyhedron().intersection(other)
def orthogonal_projection(self, point):
"""
Return the orthogonal projection of a point.
INPUT:
- ``point`` -- vector of the ambient space, or anything that
can be converted into one; not necessarily on the
hyperplane
OUTPUT:
A vector in the ambient vector space that lies on the
hyperplane.
In finite characteristic, a ``ValueError`` is raised if the
the norm of the hyperplane normal is zero.
EXAMPLES::
sage: H.<x,y,z> = HyperplaneArrangements(QQ)
sage: h = x + 2*y + 3*z - 4
sage: p1 = h.orthogonal_projection(0); p1
(2/7, 4/7, 6/7)
sage: p1 in h
True
sage: p2 = h.orthogonal_projection([3,4,5]); p2
(10/7, 6/7, 2/7)
sage: p1 in h
True
sage: p3 = h.orthogonal_projection([1,1,1]); p3
(6/7, 5/7, 4/7)
sage: p3 in h
True
"""
P = self.parent()
norm2 = sum(x**2 for x in self.A())
if norm2 == 0:
raise ValueError('norm of hyperplane normal is zero')
point = P.ambient_vector_space()(point)
n = self.normal()
return point - n * (self.b() + point*n) / norm2
def primitive(self, signed=True):
"""
Return hyperplane defined by primitive equation.
INPUT:
- ``signed`` -- boolean (optional, default: ``True``); whether
to preserve the overall sign
OUTPUT:
Hyperplane whose linear expression has common factors and
denominators cleared. That is, the same hyperplane (with the
same sign) but defined by a rescaled equation. Note that
different linear expressions must define different hyperplanes
as comparison is used in caching.
If ``signed``, the overall rescaling is by a positive constant
only.
EXAMPLES::
sage: H.<x,y> = HyperplaneArrangements(QQ)
sage: h = -1/3*x + 1/2*y - 1; h
Hyperplane -1/3*x + 1/2*y - 1
sage: h.primitive()
Hyperplane -2*x + 3*y - 6
sage: h == h.primitive()
False
sage: (4*x + 8).primitive()
Hyperplane x + 0*y + 2
sage: (4*x - y - 8).primitive(signed=True) # default
Hyperplane 4*x - y - 8
sage: (4*x - y - 8).primitive(signed=False)
Hyperplane -4*x + y + 8
"""
from sage.arith.all import lcm, gcd
coeffs = self.coefficients()
try:
d = lcm([x.denom() for x in coeffs])
n = gcd([x.numer() for x in coeffs])
except AttributeError:
return self
if not signed:
for x in coeffs:
if x > 0:
break
if x < 0:
d = -d
break
parent = self.parent()
d = parent.base_ring()(d)
n = parent.base_ring()(n)
if n == 0:
n = parent.base_ring().one()
return parent(self * d / n)
@cached_method
def _affine_subspace(self):
"""
Return the hyperplane as affine subspace.
OUTPUT:
The hyperplane as a
:class:`~sage.geometry.hyperplane_arrangement.affine_subspace.AffineSubspace`.
EXAMPLES::
sage: H.<x,y> = HyperplaneArrangements(QQ)
sage: h = -1/3*x + 1/2*y - 1; h
Hyperplane -1/3*x + 1/2*y - 1
sage: h._affine_subspace()
Affine space p + W where:
p = (-12/13, 18/13)
W = Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[ 1 2/3]
"""
from sage.geometry.hyperplane_arrangement.affine_subspace import AffineSubspace
return AffineSubspace(self.point(), self.linear_part())
def plot(self, **kwds):
"""
Plot the hyperplane.
OUTPUT:
A graphics object.
EXAMPLES::
sage: L.<x, y> = HyperplaneArrangements(QQ)
sage: (x+y-2).plot()
Graphics object consisting of 2 graphics primitives
"""
from sage.geometry.hyperplane_arrangement.plot import plot_hyperplane
return plot_hyperplane(self, **kwds)
def __or__(self, other):
"""
Construct hyperplane arrangement from bitwise or.
EXAMPLES::
sage: L.<x, y> = HyperplaneArrangements(QQ)
sage: x | y + 1
Arrangement <y + 1 | x>
sage: x | [(0,1), 1]
Arrangement <y + 1 | x>
TESTS::
sage: (x | y).parent() is L
True
"""
from sage.geometry.hyperplane_arrangement.arrangement import HyperplaneArrangements
parent = self.parent()
arrangement = HyperplaneArrangements(parent.base_ring(), names=parent._names)
return arrangement(self, other)
def to_symmetric_space(self):
"""
Return ``self`` considered as an element in the corresponding
symmetric space.
EXAMPLES::
sage: L.<x, y> = HyperplaneArrangements(QQ)
sage: h = -1/3*x + 1/2*y
sage: h.to_symmetric_space()
-1/3*x + 1/2*y
sage: hp = -1/3*x + 1/2*y - 1
sage: hp.to_symmetric_space()
Traceback (most recent call last):
...
ValueError: the hyperplane must pass through the origin
"""
coeff = self.coefficients()
if coeff[0] != 0:
raise ValueError("the hyperplane must pass through the origin")
S = self.parent().symmetric_space()
G = S.gens()
# We skip the first coefficient since it corresponds to the constant term
return S.sum(G[i]*c for i,c in enumerate(coeff[1:]))
class AmbientVectorSpace(LinearExpressionModule):
"""
The ambient space for hyperplanes.
This class is the parent for the :class:`Hyperplane` instances.
TESTS::
sage: from sage.geometry.hyperplane_arrangement.hyperplane import AmbientVectorSpace
sage: V = AmbientVectorSpace(QQ, ('x', 'y'))
sage: V.change_ring(QQ) is V
True
"""
Element = Hyperplane
def _repr_(self):
"""
Return a string representation.
OUTPUT:
A string.
EXAMPLES::
sage: from sage.geometry.hyperplane_arrangement.hyperplane import AmbientVectorSpace
sage: AmbientVectorSpace(QQ, ('x', 'y'))
2-dimensional linear space over Rational Field with coordinates x, y
"""
return '{0}-dimensional linear space over {3} with coordinate{1} {2}'.format(
self.dimension(),
's' if self.ngens() > 1 else '',
', '.join(self._names),
self.base_ring())
def dimension(self):
"""
Return the ambient space dimension.
OUTPUT:
An integer.
EXAMPLES::
sage: M.<x,y> = HyperplaneArrangements(QQ)
sage: x.parent().dimension()
2
sage: x.parent() is M.ambient_space()
True
sage: x.dimension()
1
"""
return self.ngens()
def change_ring(self, base_ring):
"""
Return a ambient vector space with a changed base ring.
INPUT:
- ``base_ring`` -- a ring; the new base ring
OUTPUT:
A new :class:`AmbientVectorSpace`.
EXAMPLES::
sage: M.<y> = HyperplaneArrangements(QQ)
sage: V = M.ambient_space()
sage: V.change_ring(RR)
1-dimensional linear space over Real Field with 53 bits of precision with coordinate y
TESTS::
sage: V.change_ring(QQ) is V
True
"""
return AmbientVectorSpace(base_ring, self._names)
def symmetric_space(self):
"""
Construct the symmetric space of ``self``.
Consider a hyperplane arrangement `A` in the vector space
`V = k^n`, for some field `k`. The symmetric space is the
symmetric algebra `S(V^*)` as the polynomial ring
`k[x_1, x_2, \ldots, x_n]` where `(x_1, x_2, \ldots, x_n)` is
a basis for `V`.
EXAMPLES::
sage: H.<x,y,z> = HyperplaneArrangements(QQ)
sage: A = H.ambient_space()
sage: A.symmetric_space()
Multivariate Polynomial Ring in x, y, z over Rational Field
"""
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
return PolynomialRing(self.base_ring(), self.variable_names())
| 29.711664 | 98 | 0.539524 |
f76ce20b3e76bca848bc6d42832f90afd289683a
| 1,450 |
py
|
Python
|
plugins/aea-cli-ipfs/aea_cli_ipfs/exceptions.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | 28 |
2021-10-31T18:54:14.000Z
|
2022-03-17T13:10:43.000Z
|
plugins/aea-cli-ipfs/aea_cli_ipfs/exceptions.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | 66 |
2021-10-31T11:55:48.000Z
|
2022-03-31T06:26:23.000Z
|
plugins/aea-cli-ipfs/aea_cli_ipfs/exceptions.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2021-2022 Valory AG
# Copyright 2018-2020 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Module to contain exceptions for ipfs plugin."""
class BaseIPFSToolException(Exception):
"""Base ipfs tool exception."""
class RemoveError(BaseIPFSToolException):
"""Exception on remove."""
class PinError(BaseIPFSToolException):
"""Exception on pin."""
class PublishError(BaseIPFSToolException):
"""Exception on publish."""
class NodeError(BaseIPFSToolException):
"""Exception for node connection check."""
class DownloadError(BaseIPFSToolException):
"""Exception on download failed."""
class HashNotProvided(BaseIPFSToolException):
"""Exception when hash is not provided."""
| 29.591837 | 80 | 0.644828 |
f76d0c53f680dc5c347158dd74800d5247414e98
| 12,308 |
py
|
Python
|
cgi-bin/hy/models.py
|
kloimhardt/bb-web
|
043493450c73620bc277cb0f93e08b897d45fbf5
|
[
"MIT"
] | 67 |
2020-09-09T14:48:53.000Z
|
2022-03-17T02:00:24.000Z
|
cgi-bin/hy/models.py
|
kloimhardt/bb-web
|
043493450c73620bc277cb0f93e08b897d45fbf5
|
[
"MIT"
] | 1 |
2020-09-29T07:19:48.000Z
|
2020-09-29T13:39:53.000Z
|
cgi-bin/hy/models.py
|
kloimhardt/bb-web
|
043493450c73620bc277cb0f93e08b897d45fbf5
|
[
"MIT"
] | null | null | null |
# Copyright 2021 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
from __future__ import unicode_literals
from contextlib import contextmanager
from math import isnan, isinf
from hy import _initialize_env_var
from hy.errors import HyWrapperError
from fractions import Fraction
from colorama import Fore
PRETTY = True
COLORED = _initialize_env_var('HY_COLORED_AST_OBJECTS', False)
@contextmanager
def pretty(pretty=True):
"""
Context manager to temporarily enable
or disable pretty-printing of Hy model reprs.
"""
global PRETTY
old, PRETTY = PRETTY, pretty
try:
yield
finally:
PRETTY = old
class _ColoredModel:
"""
Mixin that provides a helper function for models that have color.
"""
def _colored(self, text):
if COLORED:
return self.color + text + Fore.RESET
else:
return text
class HyObject(object):
"""
Generic Hy Object model. This is helpful to inject things into all the
Hy lexing Objects at once.
The position properties (`start_line`, `end_line`, `start_column`,
`end_column`) are each 1-based and inclusive. For example, a symbol
`abc` starting at the first column would have `start_column` 1 and
`end_column` 3.
"""
properties = ["module", "_start_line", "end_line", "_start_column",
"end_column"]
def replace(self, other, recursive=False):
if isinstance(other, HyObject):
for attr in self.properties:
if not hasattr(self, attr) and hasattr(other, attr):
setattr(self, attr, getattr(other, attr))
else:
raise TypeError("Can't replace a non Hy object '{}' with a Hy object '{}'".format(repr(other), repr(self)))
return self
@property
def start_line(self):
return getattr(self, "_start_line", 1)
@start_line.setter
def start_line(self, value):
self._start_line = value
@property
def start_column(self):
return getattr(self, "_start_column", 1)
@start_column.setter
def start_column(self, value):
self._start_column = value
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, super(HyObject, self).__repr__())
_wrappers = {}
def wrap_value(x):
"""Wrap `x` into the corresponding Hy type.
This allows replace_hy_obj to convert a non Hy object to a Hy object.
This also allows a macro to return an unquoted expression transparently.
"""
new = _wrappers.get(type(x), lambda y: y)(x)
if not isinstance(new, HyObject):
raise HyWrapperError("Don't know how to wrap {!r}: {!r}".format(type(x), x))
if isinstance(x, HyObject):
new = new.replace(x, recursive=False)
return new
def replace_hy_obj(obj, other):
return wrap_value(obj).replace(other)
def repr_indent(obj):
return repr(obj).replace("\n", "\n ")
class HyString(HyObject, str):
"""
Generic Hy String object. Helpful to store string literals from Hy
scripts. It's either a ``str`` or a ``unicode``, depending on the
Python version.
"""
def __new__(cls, s=None, brackets=None):
value = super(HyString, cls).__new__(cls, s)
value.brackets = brackets
return value
_wrappers[str] = HyString
class HyBytes(HyObject, bytes):
"""
Generic Hy Bytes object. It's either a ``bytes`` or a ``str``, depending
on the Python version.
"""
pass
_wrappers[bytes] = HyBytes
class HySymbol(HyObject, str):
"""
Hy Symbol. Basically a string.
"""
def __new__(cls, s=None):
return super(HySymbol, cls).__new__(cls, s)
_wrappers[bool] = lambda x: HySymbol("True") if x else HySymbol("False")
_wrappers[type(None)] = lambda foo: HySymbol("None")
class HyKeyword(HyObject):
"""Generic Hy Keyword object."""
__slots__ = ['name']
def __init__(self, value):
self.name = value
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.name)
def __str__(self):
return ":%s" % self.name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
if not isinstance(other, HyKeyword):
return NotImplemented
return self.name == other.name
def __ne__(self, other):
if not isinstance(other, HyKeyword):
return NotImplemented
return self.name != other.name
def __bool__(self):
return bool(self.name)
_sentinel = object()
def __call__(self, data, default=_sentinel):
from hy.lex import mangle
try:
return data[mangle(self.name)]
except KeyError:
if default is HyKeyword._sentinel:
raise
return default
# __getstate__ and __setstate__ are required for Pickle protocol
# 0, because we have __slots__.
def __getstate__(self):
return {k: getattr(self, k)
for k in self.properties + self.__slots__
if hasattr(self, k)}
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def strip_digit_separators(number):
# Don't strip a _ or , if it's the first character, as _42 and
# ,42 aren't valid numbers
return (number[0] + number[1:].replace("_", "").replace(",", "")
if isinstance(number, str) and len(number) > 1
else number)
class HyInteger(HyObject, int):
"""
Internal representation of a Hy Integer. May raise a ValueError as if
int(foo) was called, given HyInteger(foo).
"""
def __new__(cls, number, *args, **kwargs):
if isinstance(number, str):
number = strip_digit_separators(number)
bases = {"0x": 16, "0o": 8, "0b": 2}
for leader, base in bases.items():
if number.startswith(leader):
# We've got a string, known leader, set base.
number = int(number, base=base)
break
else:
# We've got a string, no known leader; base 10.
number = int(number, base=10)
else:
# We've got a non-string; convert straight.
number = int(number)
return super(HyInteger, cls).__new__(cls, number)
_wrappers[int] = HyInteger
def check_inf_nan_cap(arg, value):
if isinstance(arg, str):
if isinf(value) and "i" in arg.lower() and "Inf" not in arg:
raise ValueError('Inf must be capitalized as "Inf"')
if isnan(value) and "NaN" not in arg:
raise ValueError('NaN must be capitalized as "NaN"')
class HyFloat(HyObject, float):
"""
Internal representation of a Hy Float. May raise a ValueError as if
float(foo) was called, given HyFloat(foo).
"""
def __new__(cls, num, *args, **kwargs):
value = super(HyFloat, cls).__new__(cls, strip_digit_separators(num))
check_inf_nan_cap(num, value)
return value
_wrappers[float] = HyFloat
class HyComplex(HyObject, complex):
"""
Internal representation of a Hy Complex. May raise a ValueError as if
complex(foo) was called, given HyComplex(foo).
"""
def __new__(cls, real, imag=0, *args, **kwargs):
if isinstance(real, str):
value = super(HyComplex, cls).__new__(
cls, strip_digit_separators(real)
)
p1, _, p2 = real.lstrip("+-").replace("-", "+").partition("+")
check_inf_nan_cap(p1, value.imag if "j" in p1 else value.real)
if p2:
check_inf_nan_cap(p2, value.imag)
return value
return super(HyComplex, cls).__new__(cls, real, imag)
_wrappers[complex] = HyComplex
class HySequence(HyObject, tuple, _ColoredModel):
"""
An abstract type for sequence-like models to inherit from.
"""
def replace(self, other, recursive=True):
if recursive:
for x in self:
replace_hy_obj(x, other)
HyObject.replace(self, other)
return self
def __add__(self, other):
return self.__class__(super(HySequence, self).__add__(
tuple(other) if isinstance(other, list) else other))
def __getslice__(self, start, end):
return self.__class__(super(HySequence, self).__getslice__(start, end))
def __getitem__(self, item):
ret = super(HySequence, self).__getitem__(item)
if isinstance(item, slice):
return self.__class__(ret)
return ret
color = None
def __repr__(self):
return str(self) if PRETTY else super(HySequence, self).__repr__()
def __str__(self):
with pretty():
if self:
return self._colored("{}{}\n {}{}".format(
self._colored(self.__class__.__name__),
self._colored("(["),
self._colored(",\n ").join(map(repr_indent, self)),
self._colored("])"),
))
return self._colored("{}([\n {}])".format(
self.__class__.__name__,
','.join(repr_indent(e) for e in self),
))
else:
return self._colored(self.__class__.__name__ + "()")
class HyFComponent(HySequence):
"""
Analogue of ast.FormattedValue.
The first node in the contained sequence is the value being formatted,
the rest of the sequence contains the nodes in the format spec (if any).
"""
def __new__(cls, s=None, conversion=None):
value = super().__new__(cls, s)
value.conversion = conversion
return value
def replace(self, other, recursive=True):
super().replace(other, recursive)
if hasattr(other, "conversion"):
self.conversion = other.conversion
return self
class HyFString(HySequence):
"""
Generic Hy F-String object, for smarter f-string handling.
Mimics ast.JoinedStr, but using HyString and HyFComponent.
"""
def __new__(cls, s=None, brackets=None):
value = super().__new__(cls, s)
value.brackets = brackets
return value
class HyList(HySequence):
color = Fore.CYAN
def recwrap(f):
return lambda l: f(wrap_value(x) for x in l)
_wrappers[HyFComponent] = recwrap(HyFComponent)
_wrappers[HyFString] = recwrap(HyFString)
_wrappers[HyList] = recwrap(HyList)
_wrappers[list] = recwrap(HyList)
_wrappers[tuple] = recwrap(HyList)
class HyDict(HySequence, _ColoredModel):
"""
HyDict (just a representation of a dict)
"""
color = Fore.GREEN
def __str__(self):
with pretty():
if self:
pairs = []
for k, v in zip(self[::2],self[1::2]):
k, v = repr_indent(k), repr_indent(v)
pairs.append(
("{0}{c}\n {1}\n "
if '\n' in k+v
else "{0}{c} {1}").format(k, v, c=self._colored(',')))
if len(self) % 2 == 1:
pairs.append("{} {}\n".format(
repr_indent(self[-1]), self._colored("# odd")))
return "{}\n {}{}".format(
self._colored("HyDict(["),
"{c}\n ".format(c=self._colored(',')).join(pairs),
self._colored("])"))
else:
return self._colored("HyDict()")
def keys(self):
return list(self[0::2])
def values(self):
return list(self[1::2])
def items(self):
return list(zip(self.keys(), self.values()))
_wrappers[HyDict] = recwrap(HyDict)
_wrappers[dict] = lambda d: HyDict(wrap_value(x) for x in sum(d.items(), ()))
class HyExpression(HySequence):
"""
Hy S-Expression. Basically just a list.
"""
color = Fore.YELLOW
_wrappers[HyExpression] = recwrap(HyExpression)
_wrappers[Fraction] = lambda e: HyExpression(
[HySymbol("fraction"), wrap_value(e.numerator), wrap_value(e.denominator)])
class HySet(HySequence):
"""
Hy set (just a representation of a set)
"""
color = Fore.RED
_wrappers[HySet] = recwrap(HySet)
_wrappers[set] = recwrap(HySet)
| 28.757009 | 119 | 0.59766 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.