hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8fa21bf9469d7e21ccf274eee48acfaab6db7cf7
| 13,315 |
py
|
Python
|
onnxsim/onnx_simplifier.py
|
Wheest/onnx-simplifier
|
70e3b52aadbc73fe01029dc7ba0d4965d8fc1a8c
|
[
"Apache-2.0"
] | 2 |
2020-06-24T04:22:03.000Z
|
2020-08-04T08:32:04.000Z
|
onnxsim/onnx_simplifier.py
|
fedral/onnx-simplifier
|
70e3b52aadbc73fe01029dc7ba0d4965d8fc1a8c
|
[
"Apache-2.0"
] | null | null | null |
onnxsim/onnx_simplifier.py
|
fedral/onnx-simplifier
|
70e3b52aadbc73fe01029dc7ba0d4965d8fc1a8c
|
[
"Apache-2.0"
] | null | null | null |
from collections import OrderedDict
from typing import List, Dict, Union, Optional, Tuple
import copy
import onnx # type: ignore
import onnx.helper # type: ignore
import onnx.optimizer # type: ignore
import onnx.shape_inference # type: ignore
import onnx.numpy_helper
import onnxruntime as rt # type: ignore
import numpy as np # type: ignore
TensorShape = List[int]
TensorShapes = Dict[Optional[str], TensorShape]
def add_features_to_output(m: onnx.ModelProto, nodes: List[onnx.NodeProto]) -> None:
"""
Add features to output in pb, so that ONNX Runtime will output them.
:param m: the model that will be run in ONNX Runtime
:param nodes: nodes whose outputs will be added into the graph outputs
"""
for node in nodes:
for output in node.output:
m.graph.output.extend([onnx.ValueInfoProto(name=output)])
def get_shape_from_value_info_proto(v: onnx.ValueInfoProto) -> List[int]:
return [dim.dim_value for dim in v.type.tensor_type.shape.dim]
def get_value_info_all(m: onnx.ModelProto, name: str) -> Optional[onnx.ValueInfoProto]:
for v in m.graph.value_info:
if v.name == name:
return v
for v in m.graph.input:
if v.name == name:
return v
for v in m.graph.output:
if v.name == name:
return v
return None
def get_shape(m: onnx.ModelProto, name: str) -> TensorShape:
"""
Note: This method relies on onnx shape inference, which is not reliable. So only use it on input or output tensors
"""
v = get_value_info_all(m, name)
if v is not None:
return get_shape_from_value_info_proto(v)
raise RuntimeError('Cannot get shape of "{}"'.format(name))
def get_elem_type(m: onnx.ModelProto, name: str) -> Optional[int]:
v = get_value_info_all(m, name)
if v is not None:
return v.type.tensor_type.elem_type
return None
def get_np_type_from_elem_type(elem_type: int) -> int:
sizes = (None, np.float32, np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, str, np.bool,
np.float16, np.double, np.uint32, np.uint64, np.complex64, np.complex128, np.float16)
assert len(sizes) == 17
size = sizes[elem_type]
assert size is not None
return size
def get_input_names(model: onnx.ModelProto) -> List[str]:
input_names = list(set([ipt.name for ipt in model.graph.input]) -
set([x.name for x in model.graph.initializer]))
return input_names
def add_initializers_into_inputs(model: onnx.ModelProto) -> onnx.ModelProto:
for x in model.graph.initializer:
input_names = [x.name for x in model.graph.input]
if x.name not in input_names:
shape = onnx.TensorShapeProto()
for dim in x.dims:
shape.dim.extend([onnx.TensorShapeProto.Dimension(dim_value=dim)])
model.graph.input.extend(
[onnx.ValueInfoProto(name=x.name,
type=onnx.TypeProto(tensor_type=onnx.TypeProto.Tensor(elem_type=x.data_type,
shape=shape)))])
return model
def generate_rand_input(model, input_shapes: Optional[TensorShapes] = None):
if input_shapes is None:
input_shapes = {}
input_names = get_input_names(model)
full_input_shapes = {ipt: get_shape(model, ipt) for ipt in input_names}
assert None not in input_shapes
full_input_shapes.update(input_shapes) # type: ignore
for key in full_input_shapes:
if np.prod(full_input_shapes[key]) <= 0:
raise RuntimeError(
'The shape of input "{}" has dynamic size, '
'please determine the input size manually by --input-shape xxx'.format(key))
inputs = {ipt: np.array(np.random.rand(*full_input_shapes[ipt]),
dtype=get_np_type_from_elem_type(get_elem_type(model, ipt))) for ipt in
input_names}
return inputs
def get_constant_nodes(m: onnx.ModelProto) -> List[onnx.NodeProto]:
const_nodes = []
const_tensors = [x.name for x in m.graph.initializer]
const_tensors.extend([node.output[0]
for node in m.graph.node if node.op_type == 'Constant'])
# If one of the input of a node is produced (directly or indirectly) by nms,
# we consider the output of this node doesn't have constant shape,
# so we do not simplify a such node even if the node is Shape op
tensors_nms = []
for node in m.graph.node:
if any(x in tensors_nms for x in node.input):
tensors_nms.extend(node.output)
elif node.op_type == 'Shape':
const_nodes.append(node)
const_tensors.extend(node.output)
elif node.op_type == 'NonMaxSuppression':
tensors_nms.extend(node.output)
elif all([x in const_tensors for x in node.input]):
const_nodes.append(node)
const_tensors.extend(node.output)
return copy.deepcopy(const_nodes)
def forward(model, inputs=None, input_shapes: Optional[TensorShapes] = None) -> Dict[str, np.ndarray]:
if input_shapes is None:
input_shapes = {}
sess_options = rt.SessionOptions()
sess_options.graph_optimization_level = rt.GraphOptimizationLevel(0)
sess_options.log_severity_level = 3
sess = rt.InferenceSession(model.SerializeToString(), sess_options=sess_options, providers=['CPUExecutionProvider'])
if inputs is None:
inputs = generate_rand_input(model, input_shapes=input_shapes)
outputs = [x.name for x in sess.get_outputs()]
run_options = rt.RunOptions()
run_options.log_severity_level = 3
res = OrderedDict(zip(outputs, sess.run(outputs, inputs, run_options=run_options)))
return res
def forward_for_node_outputs(model: onnx.ModelProto, nodes: List[onnx.NodeProto],
input_shapes: Optional[TensorShapes] = None) -> Dict[str, np.ndarray]:
if input_shapes is None:
input_shapes = {}
model = copy.deepcopy(model)
add_features_to_output(model, nodes)
res = forward(model, input_shapes=input_shapes)
return res
def insert_elem(repeated_container, index: int, element):
repeated_container.extend([repeated_container[-1]])
for i in reversed(range(index + 1, len(repeated_container) - 1)):
repeated_container[i].CopyFrom(repeated_container[i - 1])
repeated_container[index].CopyFrom(element)
def eliminate_const_nodes(model: onnx.ModelProto, const_nodes: List[onnx.NodeProto],
res: Dict[str, np.ndarray]) -> onnx.ModelProto:
"""
:param model: the original onnx model
:param const_nodes: const nodes detected by `get_constant_nodes`
:param res: The dict containing all tensors, got by `forward_all`
:return: the simplified onnx model. Redundant ops are all removed.
"""
for i, node in enumerate(model.graph.node):
if node in const_nodes:
for output in node.output:
new_node = copy.deepcopy(node)
new_node.name = "node_" + output
new_node.op_type = 'Constant'
new_attr = onnx.helper.make_attribute(
'value',
onnx.numpy_helper.from_array(res[output], name=output)
)
del new_node.input[:]
del new_node.attribute[:]
del new_node.output[:]
new_node.output.extend([output])
new_node.attribute.extend([new_attr])
insert_elem(model.graph.node, i + 1, new_node)
del model.graph.node[i]
return model
def optimize(model: onnx.ModelProto, skip_fuse_bn: bool) -> onnx.ModelProto:
"""
:param model: The onnx model.
:return: The optimized onnx model.
Before simplifying, use this method to generate value_info, which is used in `forward_all`
After simplifying, use this method to fold constants generated in previous step into initializer,
and eliminate unused constants.
"""
# Due to a onnx bug, https://github.com/onnx/onnx/issues/2417, we need to add missing initializers into inputs
onnx.checker.check_model(model)
input_num = len(model.graph.input)
model = add_initializers_into_inputs(model)
onnx.helper.strip_doc_string(model)
onnx.checker.check_model(model)
optimizers_list = ['eliminate_deadend', 'eliminate_identity', 'eliminate_nop_dropout',
'eliminate_nop_monotone_argmax', 'eliminate_nop_pad',
'extract_constant_to_initializer', 'eliminate_unused_initializer',
'eliminate_nop_transpose', 'fuse_add_bias_into_conv',
# https://github.com/daquexian/onnx-simplifier/issues/31
# 'fuse_consecutive_concats',
'fuse_consecutive_log_softmax',
'fuse_consecutive_reduce_unsqueeze', 'fuse_consecutive_squeezes',
'fuse_consecutive_transposes', 'fuse_matmul_add_bias_into_gemm',
'fuse_pad_into_conv', 'fuse_transpose_into_gemm']
if not skip_fuse_bn:
optimizers_list.append('fuse_bn_into_conv')
model = onnx.optimizer.optimize(model, optimizers_list,
fixed_point=True)
del model.graph.input[input_num:]
onnx.checker.check_model(model)
return model
def check(model_opt: onnx.ModelProto, model_ori: onnx.ModelProto, n_times: int = 5,
input_shapes: Optional[TensorShapes] = None) -> bool:
"""
Warning: Some models (e.g., MobileNet) may fail this check by a small magnitude.
Just ignore if it happens.
:param input_shapes: Shapes of generated random inputs
:param model_opt: The simplified ONNX model
:param model_ori: The original ONNX model
:param n_times: Generate n random inputs
"""
if input_shapes is None:
input_shapes = {}
onnx.checker.check_model(model_opt)
for i in range(n_times):
print("Checking {}/{}...".format(i, n_times))
rand_input = generate_rand_input(model_opt, input_shapes=input_shapes)
res_opt = forward(model_opt, inputs=rand_input)
res_ori = forward(model_ori, inputs=rand_input)
for name in res_opt.keys():
if not np.allclose(res_opt[name], res_ori[name], rtol=1e-4, atol=1e-5):
print("Tensor {} changes after simplifying. The max diff is {}.".format(
name, np.max(np.abs(res_opt[name] - res_ori[name]))))
print("Note that the checking is not always correct.")
print("After simplifying:")
print(res_opt[name])
print("Before simplifying:")
print(res_ori[name])
print("----------------")
return False
return True
def clean_constant_nodes(const_nodes: List[onnx.NodeProto], res: Dict[str, np.ndarray]):
"""
It seems not needed since commit 6f2a72, but maybe it still prevents some unknown bug
:param const_nodes: const nodes detected by `get_constant_nodes`
:param res: The dict containing all tensors, got by `forward_all`
:return: The constant nodes which have an output in res
"""
return [node for node in const_nodes if node.output[0] in res]
def check_and_update_input_shapes(model: onnx.ModelProto, input_shapes: TensorShapes) -> TensorShapes:
input_names = get_input_names(model)
if None in input_shapes:
if len(input_names) == 1:
input_shapes[input_names[0]] = input_shapes[None]
del input_shapes[None]
else:
raise RuntimeError(
'The model has more than 1 inputs, please use the format "input_name:dim0,dim1,...,dimN" in --input-shape')
for x in input_shapes:
if x not in input_names:
raise RuntimeError(
'The model doesn\'t have input named "{}"'.format(x))
return input_shapes
def simplify(model: Union[str, onnx.ModelProto], check_n: int = 0, perform_optimization: bool = True,
skip_fuse_bn: bool = False, input_shapes: Optional[TensorShapes] = None) \
-> Tuple[onnx.ModelProto, bool]:
if input_shapes is None:
input_shapes = {}
if type(model) == str:
model = onnx.load(model)
onnx.checker.check_model(model)
model_ori = copy.deepcopy(model)
model = onnx.shape_inference.infer_shapes(model)
input_shapes = check_and_update_input_shapes(model, input_shapes)
if perform_optimization:
model = optimize(model, skip_fuse_bn)
const_nodes = get_constant_nodes(model)
res = forward_for_node_outputs(model, const_nodes, input_shapes=input_shapes)
const_nodes = clean_constant_nodes(const_nodes, res)
model = eliminate_const_nodes(model, const_nodes, res)
onnx.checker.check_model(model)
if perform_optimization:
model = optimize(model, skip_fuse_bn)
check_ok = check(model_ori, model, check_n, input_shapes=input_shapes)
return model, check_ok
| 41.350932 | 123 | 0.646113 |
5054b8981bd6ac93ad63782b615a7b2198ba4051
| 5,258 |
py
|
Python
|
userbot/modules/qrcode.py
|
AlhamFadilah/UserBotTG
|
23a7d7d0500d3dc86c240636c3e5b93a376d0df6
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/qrcode.py
|
AlhamFadilah/UserBotTG
|
23a7d7d0500d3dc86c240636c3e5b93a376d0df6
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 |
2020-07-31T10:28:19.000Z
|
2020-07-31T10:28:19.000Z
|
userbot/modules/qrcode.py
|
AlhamFadilah/UserBotTG
|
23a7d7d0500d3dc86c240636c3e5b93a376d0df6
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
# The entire source code is OSSRPL except 'makeqr and getqr' which is MPL
# License: MPL and OSSRPL
"""Userbot module containing commands related to QR Codes."""
import os
import asyncio
import qrcode
import barcode
from barcode.writer import ImageWriter
from bs4 import BeautifulSoup
from userbot import CMD_HELP, LOGS
from userbot.events import register
@register(pattern=r"^\.decode$", outgoing=True)
async def parseqr(qr_e):
"""For .decode command, get QR Code/BarCode content from the replied photo."""
downloaded_file_name = await qr_e.client.download_media(
await qr_e.get_reply_message())
# parse the Official ZXing webpage to decode the QRCode
command_to_exec = [
"curl", "-X", "POST", "-F", "f=@" + downloaded_file_name + "",
"https://zxing.org/w/decode"
]
process = await asyncio.create_subprocess_exec(
*command_to_exec,
# stdout must a pipe to be accessible as process.stdout
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
# Wait for the subprocess to finish
stdout, stderr = await process.communicate()
e_response = stderr.decode().strip()
t_response = stdout.decode().strip()
os.remove(downloaded_file_name)
if not t_response:
LOGS.info(e_response)
LOGS.info(t_response)
return await qr_e.edit("Failed to decode.")
soup = BeautifulSoup(t_response, "html.parser")
qr_contents = soup.find_all("pre")[0].text
await qr_e.edit(qr_contents)
@register(pattern=r"\.barcode(?: |$)([\s\S]*)", outgoing=True)
async def bq(event):
"""For .barcode command, genrate a barcode containing the given content."""
await event.edit("`Processing..`")
input_str = event.pattern_match.group(1)
message = "SYNTAX: `.barcode <long text to include>`"
reply_msg_id = event.message.id
if input_str:
message = input_str
elif event.reply_to_msg_id:
previous_message = await event.get_reply_message()
reply_msg_id = previous_message.id
if previous_message.media:
downloaded_file_name = await event.client.download_media(
previous_message)
m_list = None
with open(downloaded_file_name, "rb") as fd:
m_list = fd.readlines()
message = ""
for m in m_list:
message += m.decode("UTF-8") + "\r\n"
os.remove(downloaded_file_name)
else:
message = previous_message.message
else:
return event.edit("SYNTAX: `.barcode <long text to include>`")
bar_code_type = "code128"
try:
bar_code_mode_f = barcode.get(bar_code_type,
message,
writer=ImageWriter())
filename = bar_code_mode_f.save(bar_code_type)
await event.client.send_file(event.chat_id,
filename,
reply_to=reply_msg_id)
os.remove(filename)
except Exception as e:
return await event.edit(str(e))
await event.delete()
@register(pattern=r"\.makeqr(?: |$)([\s\S]*)", outgoing=True)
async def make_qr(makeqr):
"""For .makeqr command, make a QR Code containing the given content."""
input_str = makeqr.pattern_match.group(1)
message = "SYNTAX: `.makeqr <long text to include>`"
reply_msg_id = None
if input_str:
message = input_str
elif makeqr.reply_to_msg_id:
previous_message = await makeqr.get_reply_message()
reply_msg_id = previous_message.id
if previous_message.media:
downloaded_file_name = await makeqr.client.download_media(
previous_message)
m_list = None
with open(downloaded_file_name, "rb") as file:
m_list = file.readlines()
message = ""
for media in m_list:
message += media.decode("UTF-8") + "\r\n"
os.remove(downloaded_file_name)
else:
message = previous_message.message
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
qr.add_data(message)
qr.make(fit=True)
img = qr.make_image(fill_color="black", back_color="white")
img.save("img_file.webp", "PNG")
await makeqr.client.send_file(makeqr.chat_id,
"img_file.webp",
reply_to=reply_msg_id)
os.remove("img_file.webp")
await makeqr.delete()
CMD_HELP.update({
"qr":
">`.makeqr <content>`"
"\nUsage: Make a QR Code from the given content."
"\nExample: .makeqr www.google.com"
"\nNote: use `.decode <reply to barcode/qrcode>` to get decoded content.",
"barcode":
">`.barcode <content>`"
"\nUsage: Make a BarCode from the given content."
"\nExample: .barcode www.google.com"
"\nNote: use `.decode <reply to barcode/qrcode>` to get decoded content."
})
| 35.527027 | 82 | 0.624952 |
1e6b314e68fb542dd7f77966135e6d01359a3871
| 1,185 |
py
|
Python
|
examples/federated_learning/yolov5_coco128_mistnet/aggregate.py
|
JoeyHwong-gk/sedna
|
e110ac77b1141b83eb72df3c49ab87682d7061f5
|
[
"Apache-2.0"
] | 1 |
2021-06-15T02:08:45.000Z
|
2021-06-15T02:08:45.000Z
|
examples/federated_learning/yolov5_coco128_mistnet/aggregate.py
|
JoeyHwong-gk/sedna
|
e110ac77b1141b83eb72df3c49ab87682d7061f5
|
[
"Apache-2.0"
] | null | null | null |
examples/federated_learning/yolov5_coco128_mistnet/aggregate.py
|
JoeyHwong-gk/sedna
|
e110ac77b1141b83eb72df3c49ab87682d7061f5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from interface import mistnet, s3_transmitter, simple_chooser
from interface import Dataset, Estimator
from sedna.service.server import AggregationServerV2
from sedna.common.config import BaseConfig
def run_server():
data = Dataset()
estimator = Estimator()
estimator.pretrained = BaseConfig.pretrained_model_url.replace("yolov5.pth", "")
server = AggregationServerV2(
data=data,
estimator=estimator,
aggregation=mistnet,
transmitter=s3_transmitter,
chooser=simple_chooser)
server.start()
if __name__ == '__main__':
run_server()
| 31.184211 | 84 | 0.74346 |
5d89259cc27c5a14ab880086dfcd71eb41fbc74c
| 88 |
py
|
Python
|
nsq/sockets/deflate.py
|
dlecocq/nsq-py
|
d7698ffaea015ef291cc04b32f9cb45ed659125d
|
[
"MIT"
] | 41 |
2015-03-29T18:19:51.000Z
|
2021-07-20T04:26:55.000Z
|
nsq/sockets/deflate.py
|
dlecocq/nsq-py
|
d7698ffaea015ef291cc04b32f9cb45ed659125d
|
[
"MIT"
] | 13 |
2015-04-21T22:21:16.000Z
|
2021-06-02T00:29:13.000Z
|
nsq/sockets/deflate.py
|
dlecocq/nsq-py
|
d7698ffaea015ef291cc04b32f9cb45ed659125d
|
[
"MIT"
] | 15 |
2015-04-21T20:36:05.000Z
|
2020-10-28T11:29:51.000Z
|
'''Wraps a socket in Deflate compression'''
raise ImportError('Deflate not supported')
| 22 | 43 | 0.761364 |
25faa1441bafd84653f16905f5196becc099991f
| 401 |
py
|
Python
|
clean_kernels.py
|
DarkMatterAI/mrl
|
e000c3570d4461c3054c882697cce55217ede552
|
[
"MIT"
] | 4 |
2021-11-16T09:29:55.000Z
|
2021-12-27T17:55:32.000Z
|
clean_kernels.py
|
DarkMatterAI/mrl
|
e000c3570d4461c3054c882697cce55217ede552
|
[
"MIT"
] | null | null | null |
clean_kernels.py
|
DarkMatterAI/mrl
|
e000c3570d4461c3054c882697cce55217ede552
|
[
"MIT"
] | 3 |
2021-11-16T09:41:41.000Z
|
2021-12-27T17:55:33.000Z
|
import json
import os
paths = ['nbs/tutorials/',
'nbs/']
for path in paths:
files = [i for i in os.listdir(path) if i[-5:]=='ipynb']
for file in files:
j = json.load(open(f'{path}{file}'))
j['metadata']['kernelspec'] = {'display_name': 'Python 3', 'language': 'python', 'name': 'python3'}
with open(f'{path}{file}', 'w') as f:
json.dump(j, f)
| 25.0625 | 107 | 0.538653 |
beb4327bff90332bf13b33c0207e15bffdc0634e
| 622 |
py
|
Python
|
Cards/templatetags/custom_tags.py
|
vabene1111/LearningCards
|
00539c8d5d3063eecc306dd68eb3eeeac89dba9f
|
[
"MIT"
] | 1 |
2020-03-18T15:10:42.000Z
|
2020-03-18T15:10:42.000Z
|
Cards/templatetags/custom_tags.py
|
vabene1111/LearningCards
|
00539c8d5d3063eecc306dd68eb3eeeac89dba9f
|
[
"MIT"
] | 1 |
2020-02-22T20:03:02.000Z
|
2020-02-23T16:31:56.000Z
|
Cards/templatetags/custom_tags.py
|
vabene1111/LearningCards
|
00539c8d5d3063eecc306dd68eb3eeeac89dba9f
|
[
"MIT"
] | null | null | null |
from django import template
import markdown as md
import bleach
from bleach_whitelist import markdown_tags, markdown_attrs
from Cards.helper.mdx_extension import MarkdownFormatExtension
register = template.Library()
@register.filter(name='get_class')
def get_class(value):
return value.__class__.__name__
@register.filter()
def markdown(value):
tags = markdown_tags + ['pre', 'table', 'td', 'tr', 'th', 'tbody', 'style', 'thead']
parsed_md = md.markdown(value, extensions=['markdown.extensions.fenced_code', 'tables', MarkdownFormatExtension()])
return bleach.clean(parsed_md, tags, markdown_attrs)
| 28.272727 | 119 | 0.757235 |
e8598a551df5a465963471a238131a7e37ba54e1
| 8,010 |
py
|
Python
|
axelrod/plot.py
|
mattshirtliffe/Axelrod
|
367a787e16541fda6e6076200805f5bb6a863973
|
[
"MIT"
] | null | null | null |
axelrod/plot.py
|
mattshirtliffe/Axelrod
|
367a787e16541fda6e6076200805f5bb6a863973
|
[
"MIT"
] | null | null | null |
axelrod/plot.py
|
mattshirtliffe/Axelrod
|
367a787e16541fda6e6076200805f5bb6a863973
|
[
"MIT"
] | 1 |
2018-10-07T19:07:18.000Z
|
2018-10-07T19:07:18.000Z
|
from numpy import arange, median, nan_to_num
import warnings
matplotlib_installed = True
try:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
from mpl_toolkits.axes_grid1 import make_axes_locatable
except ImportError:
matplotlib_installed = False
except RuntimeError:
matplotlib_installed = False
warnings.warn(
'Matplotlib failed to import and so no plots will be produced. This ' +
'could be caused by using a virtual environment on OSX. See ' +
'http://matplotlib.org/faq/virtualenv_faq.html for details.')
def default_cmap():
"""Sets a default matplotlib colormap based on the version."""
s = matplotlib.__version__.split('.')
if int(s[0]) >= 1 and int(s[1]) >= 5:
return "viridis"
else:
return 'YlGnBu'
class Plot(object):
def __init__(self, result_set):
self.result_set = result_set
self.matplotlib_installed = matplotlib_installed
def _violinplot(self, data, names, title=None):
"""For making violinplots."""
if not self.matplotlib_installed:
return None
nplayers = self.result_set.nplayers
width = max(nplayers / 3, 12)
height = width / 2
figure = plt.figure(figsize=(width, height))
spacing = 4
positions = spacing * arange(1, nplayers + 1, 1)
plt.violinplot(data, positions=positions, widths=spacing / 2,
showmedians=True, showextrema=False)
plt.xticks(positions, names, rotation=90)
plt.xlim(0, spacing * (nplayers + 1))
plt.tick_params(axis='both', which='both', labelsize=8)
if title:
plt.title(title)
return figure
# Box and Violin plots for mean score, score differences, wins, and match
# lengths
@property
def _boxplot_dataset(self):
return [list(nan_to_num(self.result_set.normalised_scores[ir]))
for ir in self.result_set.ranking]
@property
def _boxplot_xticks_locations(self):
return list(range(1, len(self.result_set.ranked_names) + 2))
@property
def _boxplot_xticks_labels(self):
return [str(n) for n in self.result_set.ranked_names]
def boxplot(self, title=None):
"""For the specific mean score boxplot."""
data = self._boxplot_dataset
names = self._boxplot_xticks_labels
figure = self._violinplot(data, names, title=title)
return figure
@property
def _winplot_dataset(self):
# Sort wins by median
wins = self.result_set.wins
players = self.result_set.players
medians = map(median, wins)
medians = sorted(
[(m, i) for (i, m) in enumerate(medians)], reverse=True)
# Reorder and grab names
wins = [wins[x[-1]] for x in medians]
ranked_names = [str(players[x[-1]]) for x in medians]
return wins, ranked_names
def winplot(self, title=None):
"""Plots the distributions for the number of wins for each strategy."""
if not self.matplotlib_installed:
return None
data, names = self._winplot_dataset
figure = self._violinplot(data, names, title)
# Expand ylim a bit
maximum = max(max(w) for w in data)
plt.ylim(-0.5, 0.5 + maximum)
return figure
@property
def _sd_ordering(self):
return self.result_set.ranking
@property
def _sdv_plot_dataset(self):
ordering = self._sd_ordering
diffs = self.result_set.score_diffs
players = self.result_set.players
# Reorder and grab names
diffs = [diffs[i] for i in ordering]
ranked_names = [str(players[i]) for i in ordering]
return diffs, ranked_names
def sdvplot(self, title=None):
"""Score difference violinplots to visualize the distributions of how
players attain their payoffs."""
diffs, ranked_names = self._sdv_plot_dataset
figure = self._violinplot(diffs, ranked_names, title)
return figure
@property
def _lengthplot_dataset(self):
match_lengths = self.result_set.match_lengths
return [[length for rep in match_lengths
for length in rep[playeri]] for playeri in
self.result_set.ranking]
def lengthplot(self, title=None):
"""For the specific match length boxplot."""
data = self._lengthplot_dataset
names = self._boxplot_xticks_labels
figure = self._violinplot(data, names, title=title)
return figure
# Payoff heatmaps
@property
def _payoff_dataset(self):
pm = self.result_set.payoff_matrix
return [[pm[r1][r2]
for r2 in self.result_set.ranking]
for r1 in self.result_set.ranking]
@property
def _pdplot_dataset(self):
# Order like the sdv_plot
ordering = self._sd_ordering
pdm = self.result_set.payoff_diffs_means
# Reorder and grab names
matrix = [[pdm[r1][r2] for r2 in ordering]
for r1 in ordering]
players = self.result_set.players
ranked_names = [str(players[i]) for i in ordering]
return matrix, ranked_names
def _payoff_heatmap(self, data, names, title=None):
"""Generic heatmap plot"""
if not self.matplotlib_installed:
return None
nplayers = self.result_set.nplayers
width = max(nplayers / 4, 12)
height = width
figure, ax = plt.subplots()
figure.set_figwidth(width)
figure.set_figheight(height)
cmap = default_cmap()
mat = ax.matshow(data, cmap=cmap)
plt.xticks(range(self.result_set.nplayers))
plt.yticks(range(self.result_set.nplayers))
ax.set_xticklabels(names, rotation=90)
ax.set_yticklabels(names)
plt.tick_params(axis='both', which='both', labelsize=16)
if title:
plt.xlabel(title)
# Make the colorbar match up with the plot
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(mat, cax=cax)
return figure
def pdplot(self, title=None):
"""Payoff difference heatmap to visualize the distributions of how
players attain their payoffs."""
matrix, names = self._pdplot_dataset
return self._payoff_heatmap(matrix, names, title)
def payoff(self, title=None):
"""Payoff heatmap to visualize the distributions of how
players attain their payoffs."""
data = self._payoff_dataset
names = self.result_set.ranked_names
return self._payoff_heatmap(data, names, title)
# Ecological Plot
def stackplot(self, eco, title=None, logscale=True):
if not self.matplotlib_installed:
return None
populations = eco.population_sizes
figure, ax = plt.subplots()
turns = range(len(populations))
pops = [[populations[iturn][ir] for iturn in turns] for ir in self.result_set.ranking]
ax.stackplot(turns, *pops)
ax.yaxis.tick_left()
ax.yaxis.set_label_position("right")
ax.yaxis.labelpad = 25.0
plt.ylim([0.0, 1.0])
plt.ylabel('Relative population size')
plt.xlabel('Turn')
if title is not None:
plt.title(title)
trans = transforms.blended_transform_factory(ax.transAxes, ax.transData)
ticks = []
for i, n in enumerate(self.result_set.ranked_names):
x = -0.01
y = (i + 0.5) * 1.0 / self.result_set.nplayers
ax.annotate(n, xy=(x, y), xycoords=trans, clip_on=False,
va='center', ha='right', fontsize=5)
ticks.append(y)
ax.set_yticks(ticks)
ax.tick_params(direction='out')
ax.set_yticklabels([])
if logscale:
ax.set_xscale('log')
return figure
| 33.797468 | 94 | 0.625094 |
559f9744d62b2642e3658d52d429a977ac7b2c05
| 994 |
py
|
Python
|
sdks/python/test/test_EmailVerificationRequest.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | null | null | null |
sdks/python/test/test_EmailVerificationRequest.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | 6 |
2019-10-23T06:38:53.000Z
|
2022-01-22T07:57:58.000Z
|
sdks/python/test/test_EmailVerificationRequest.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | 2 |
2019-10-23T06:31:05.000Z
|
2021-08-21T17:32:47.000Z
|
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: [email protected]
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
from __future__ import absolute_import
import unittest
import appcenter_sdk
from EmailVerificationRequest.clsEmailVerificationRequest import EmailVerificationRequest # noqa: E501
from appcenter_sdk.rest import ApiException
class TestEmailVerificationRequest(unittest.TestCase):
"""EmailVerificationRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testEmailVerificationRequest(self):
"""Test EmailVerificationRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = appcenter_sdk.models.clsEmailVerificationRequest.EmailVerificationRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.85 | 107 | 0.738431 |
b3e02eabff5ece3165aaa93e5b0fa4ab2e1aba2e
| 3,171 |
py
|
Python
|
examples/pendulum3.py
|
birkenfeld/python-gr
|
1d6cd36616a73c8e569b8348869e6e30f3830ec4
|
[
"RSA-MD"
] | null | null | null |
examples/pendulum3.py
|
birkenfeld/python-gr
|
1d6cd36616a73c8e569b8348869e6e30f3830ec4
|
[
"RSA-MD"
] | null | null | null |
examples/pendulum3.py
|
birkenfeld/python-gr
|
1d6cd36616a73c8e569b8348869e6e30f3830ec4
|
[
"RSA-MD"
] | null | null | null |
#!/usr/bin/env python
# -*- animation -*-
"""
3D animation of a pendulum
"""
from numpy import sin, cos, sqrt, pi, array
import time
import gr
import gr3
try:
from time import perf_counter
except ImportError:
from time import clock as perf_counter
g = 9.8 # gravitational constant
def rk4(x, h, y, f):
k1 = h * f(x, y)
k2 = h * f(x + 0.5 * h, y + 0.5 * k1)
k3 = h * f(x + 0.5 * h, y + 0.5 * k2)
k4 = h * f(x + h, y + k3)
return x + h, y + (k1 + 2 * (k2 + k3) + k4) / 6.0
def damped_pendulum_deriv(t, state):
theta, omega = state
return array([omega, -gamma * omega -g / L * sin(theta)])
def sign(x):
if x > 0:
return 1
elif x < 0:
return -1
else:
return 0
def pendulum(t, theta, omega, acceleration):
gr.clearws()
gr.setviewport(0, 1, 0, 1)
x, y = (sin(theta) * 3.0, -cos(theta) * 3.0)
gr3.clear()
# draw pivot point
gr3.drawspheremesh(1, (0, 0, 0), (0.4, 0.4, 0.4), 0.1)
# draw rod
gr3.drawcylindermesh(1, (0, 0, 0), (x, y, 0), (0.6, 0.6, 0.6), 0.05, 3.0)
# draw sphere
gr3.drawspheremesh(1, (x, y, 0), (1, 1, 1), 0.25)
# show angular velocity
V = 0.3 * omega - sign(omega) * 0.15
gr3.drawcylindermesh(1, (x, y, 0), (cos(theta), sin(theta), 0), (0, 0, 1),
0.05, V)
gr3.drawconemesh(1, (x + cos(theta) * V, y + sin(theta) * V, 0),
(-y, x, 0), (0, 0, 1), 0.1, sign(omega) * 0.25)
# show angular acceleration
A = 0.3 * acceleration
gr3.drawcylindermesh(1, (x, y, 0), (sin(theta), cos(theta), 0), (1, 0, 0),
0.05, A)
gr3.drawconemesh(1, (x + sin(theta) * A, y + cos(theta) * A, 0),
(x, -y, 0), (1, 0, 0), 0.1, 0.25)
# draw GR3 objects
gr3.drawimage(0, 1, 0.15, 0.85, 500, 350, gr3.GR3_Drawable.GR3_DRAWABLE_GKS)
gr.settextfontprec(2, gr.TEXT_PRECISION_STRING)
gr.setcharheight(0.024)
gr.settextcolorind(1)
gr.textext(0.05, 0.96, 'Damped Pendulum')
gr.mathtex(0.05, 0.9, '\\omega=\\dot{\\theta}')
gr.mathtex(0.05, 0.83, '\\dot{\\omega}=-\\gamma\\omega-\\frac{g}{l}sin(\\theta)')
gr.setcharheight(0.020)
gr.textext(0.05, 0.20, 't:%7.2f' % t)
gr.textext(0.05, 0.16, '\\theta:%7.2f' % (theta / pi * 180))
gr.settextcolorind(4)
gr.textext(0.05, 0.12, '\\omega:%7.2f' % omega)
gr.settextcolorind(2)
gr.textext(0.05, 0.08, 'y_{A}:%6.2f' % acceleration)
gr.updatews()
return
theta = 110.0 # initial angle
gamma = 0.1 # damping coefficient
L = 1 # pendulum length
t = 0
dt = 0.04
state = array([theta * pi / 180, 0])
gr3.init()
gr3.setcameraprojectionparameters(45, 1, 100)
gr3.cameralookat(0, -2, 6, 0, -2, 0, 0, 1, 0)
gr3.setbackgroundcolor(1, 1, 1, 1)
gr3.setlightdirection(1, 1, 10)
now = perf_counter()
while t < 30:
start = now
t, state = rk4(t, dt, state, damped_pendulum_deriv)
theta, omega = state
acceleration = sqrt(2 * g * L * (1 - cos(theta)))
pendulum(t, theta, omega, acceleration)
now = perf_counter()
if start + dt > now:
time.sleep(start + dt - now)
gr3.terminate()
| 28.567568 | 85 | 0.548407 |
89d66b348594f057da46376ff87c325aec69f94b
| 158 |
py
|
Python
|
tests/file_error.py
|
dionboles/cda-python
|
2e00d29931801bb977d20028cb4795195d550aa4
|
[
"Apache-2.0"
] | null | null | null |
tests/file_error.py
|
dionboles/cda-python
|
2e00d29931801bb977d20028cb4795195d550aa4
|
[
"Apache-2.0"
] | null | null | null |
tests/file_error.py
|
dionboles/cda-python
|
2e00d29931801bb977d20028cb4795195d550aa4
|
[
"Apache-2.0"
] | null | null | null |
from cdapython import Q
q1 = Q("identifier.system = 'GDC'")
q6 = Q("File.identifier.system = 'GDC'")
q16 = q6
print(q16.run(host="http://localhost:8080"))
| 17.555556 | 44 | 0.664557 |
c57dad39fffb31a8ef0d668578254f6ade10e240
| 602 |
py
|
Python
|
tests/unit/test_unpack_payload.py
|
ralphribeiro/facilita-DOU
|
e695ac0f58369d61fad2723bd5e52ecd80d0b33f
|
[
"MIT"
] | null | null | null |
tests/unit/test_unpack_payload.py
|
ralphribeiro/facilita-DOU
|
e695ac0f58369d61fad2723bd5e52ecd80d0b33f
|
[
"MIT"
] | null | null | null |
tests/unit/test_unpack_payload.py
|
ralphribeiro/facilita-DOU
|
e695ac0f58369d61fad2723bd5e52ecd80d0b33f
|
[
"MIT"
] | null | null | null |
from os.path import exists, join
from os import remove
from zipfile import ZipFile
from src.app.payload import unpack_payload
def test_unpack_payload_given_path(temp_dir):
file_name = 'file.txt'
content = b'data test'
path = join(temp_dir, file_name)
with open(path, 'wb') as f:
f.write(content)
path_zip = join(temp_dir, 'file.zip')
with ZipFile(path_zip, 'x') as z:
z.write(path, file_name)
remove(path)
assert exists(path_zip)
unpack_payload(path_zip)
assert exists(path)
with open(path, 'rb') as f:
assert f.read() == content
| 25.083333 | 45 | 0.672757 |
cf2f985a2d3383539537ef215ce190dc7eb7107d
| 37,079 |
py
|
Python
|
storage/tests/system.py
|
sorced-jim/google-cloud-python
|
239907085564ae5de8df6f7358b1379252516631
|
[
"Apache-2.0"
] | 1 |
2021-07-15T14:09:37.000Z
|
2021-07-15T14:09:37.000Z
|
storage/tests/system.py
|
sorced-jim/google-cloud-python
|
239907085564ae5de8df6f7358b1379252516631
|
[
"Apache-2.0"
] | 1 |
2021-06-25T15:16:57.000Z
|
2021-06-25T15:16:57.000Z
|
storage/tests/system.py
|
y1ngyang/google-cloud-python
|
1acc8c22664229b6681ff91654932998e611e1c2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import time
import unittest
import requests
import six
from google.cloud import exceptions
from google.cloud import storage
from google.cloud.storage._helpers import _base64_md5hash
from test_utils.retry import RetryErrors
from test_utils.system import unique_resource_id
USER_PROJECT = os.environ.get('GOOGLE_CLOUD_TESTS_USER_PROJECT')
def _bad_copy(bad_request):
"""Predicate: pass only exceptions for a failed copyTo."""
err_msg = bad_request.message
return (err_msg.startswith('No file found in request. (POST') and
'copyTo' in err_msg)
retry_429 = RetryErrors(exceptions.TooManyRequests)
retry_bad_copy = RetryErrors(exceptions.BadRequest,
error_predicate=_bad_copy)
def _empty_bucket(bucket):
"""Empty a bucket of all existing blobs.
This accounts (partially) for the eventual consistency of the
list blobs API call.
"""
for blob in bucket.list_blobs():
try:
blob.delete()
except exceptions.NotFound: # eventual consistency
pass
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT = None
TEST_BUCKET = None
def setUpModule():
Config.CLIENT = storage.Client()
bucket_name = 'new' + unique_resource_id()
# In the **very** rare case the bucket name is reserved, this
# fails with a ConnectionError.
Config.TEST_BUCKET = Config.CLIENT.bucket(bucket_name)
retry_429(Config.TEST_BUCKET.create)()
def tearDownModule():
retry = RetryErrors(exceptions.Conflict)
retry(Config.TEST_BUCKET.delete)(force=True)
class TestStorageBuckets(unittest.TestCase):
def setUp(self):
self.case_buckets_to_delete = []
def tearDown(self):
for bucket_name in self.case_buckets_to_delete:
bucket = Config.CLIENT.bucket(bucket_name)
retry_429(bucket.delete)()
def test_create_bucket(self):
new_bucket_name = 'a-new-bucket' + unique_resource_id('-')
self.assertRaises(exceptions.NotFound,
Config.CLIENT.get_bucket, new_bucket_name)
created = Config.CLIENT.create_bucket(new_bucket_name)
self.case_buckets_to_delete.append(new_bucket_name)
self.assertEqual(created.name, new_bucket_name)
def test_list_buckets(self):
buckets_to_create = [
'new' + unique_resource_id(),
'newer' + unique_resource_id(),
'newest' + unique_resource_id(),
]
created_buckets = []
for bucket_name in buckets_to_create:
bucket = Config.CLIENT.bucket(bucket_name)
retry_429(bucket.create)()
self.case_buckets_to_delete.append(bucket_name)
# Retrieve the buckets.
all_buckets = Config.CLIENT.list_buckets()
created_buckets = [bucket for bucket in all_buckets
if bucket.name in buckets_to_create]
self.assertEqual(len(created_buckets), len(buckets_to_create))
def test_bucket_update_labels(self):
bucket_name = 'update-labels' + unique_resource_id('-')
bucket = retry_429(Config.CLIENT.create_bucket)(bucket_name)
self.case_buckets_to_delete.append(bucket_name)
self.assertTrue(bucket.exists())
updated_labels = {'test-label': 'label-value'}
bucket.labels = updated_labels
bucket.update()
self.assertEqual(bucket.labels, updated_labels)
new_labels = {'another-label': 'another-value'}
bucket.labels = new_labels
bucket.patch()
self.assertEqual(bucket.labels, new_labels)
bucket.labels = {}
bucket.update()
self.assertEqual(bucket.labels, {})
@unittest.skipUnless(USER_PROJECT, 'USER_PROJECT not set in environment.')
def test_crud_bucket_with_requester_pays(self):
new_bucket_name = 'w-requester-pays' + unique_resource_id('-')
created = Config.CLIENT.create_bucket(
new_bucket_name, requester_pays=True)
self.case_buckets_to_delete.append(new_bucket_name)
self.assertEqual(created.name, new_bucket_name)
self.assertTrue(created.requester_pays)
with_user_project = Config.CLIENT.bucket(
new_bucket_name, user_project=USER_PROJECT)
# Bucket will be deleted in-line below.
self.case_buckets_to_delete.remove(new_bucket_name)
try:
# Exercise 'buckets.get' w/ userProject.
self.assertTrue(with_user_project.exists())
with_user_project.reload()
self.assertTrue(with_user_project.requester_pays)
# Exercise 'buckets.patch' w/ userProject.
with_user_project.configure_website(
main_page_suffix='index.html', not_found_page='404.html')
with_user_project.patch()
self.assertEqual(
with_user_project._properties['website'], {
'mainPageSuffix': 'index.html',
'notFoundPage': '404.html',
})
# Exercise 'buckets.update' w/ userProject.
new_labels = {'another-label': 'another-value'}
with_user_project.labels = new_labels
with_user_project.update()
self.assertEqual(with_user_project.labels, new_labels)
finally:
# Exercise 'buckets.delete' w/ userProject.
with_user_project.delete()
@unittest.skipUnless(USER_PROJECT, 'USER_PROJECT not set in environment.')
def test_bucket_acls_iam_with_user_project(self):
new_bucket_name = 'acl-w-user-project' + unique_resource_id('-')
Config.CLIENT.create_bucket(
new_bucket_name, requester_pays=True)
self.case_buckets_to_delete.append(new_bucket_name)
with_user_project = Config.CLIENT.bucket(
new_bucket_name, user_project=USER_PROJECT)
# Exercise bucket ACL w/ userProject
acl = with_user_project.acl
acl.reload()
acl.all().grant_read()
acl.save()
self.assertIn('READER', acl.all().get_roles())
del acl.entities['allUsers']
acl.save()
self.assertFalse(acl.has_entity('allUsers'))
# Exercise default object ACL w/ userProject
doa = with_user_project.default_object_acl
doa.reload()
doa.all().grant_read()
doa.save()
self.assertIn('READER', doa.all().get_roles())
# Exercise IAM w/ userProject
test_permissions = ['storage.buckets.get']
self.assertEqual(
with_user_project.test_iam_permissions(test_permissions),
test_permissions)
policy = with_user_project.get_iam_policy()
viewers = policy.setdefault('roles/storage.objectViewer', set())
viewers.add(policy.all_users())
with_user_project.set_iam_policy(policy)
@unittest.skipUnless(USER_PROJECT, 'USER_PROJECT not set in environment.')
def test_copy_existing_file_with_user_project(self):
new_bucket_name = 'copy-w-requester-pays' + unique_resource_id('-')
created = Config.CLIENT.create_bucket(
new_bucket_name, requester_pays=True)
self.case_buckets_to_delete.append(new_bucket_name)
self.assertEqual(created.name, new_bucket_name)
self.assertTrue(created.requester_pays)
to_delete = []
blob = storage.Blob('simple', bucket=created)
blob.upload_from_string(b'DEADBEEF')
to_delete.append(blob)
try:
with_user_project = Config.CLIENT.bucket(
new_bucket_name, user_project=USER_PROJECT)
new_blob = retry_bad_copy(with_user_project.copy_blob)(
blob, with_user_project, 'simple-copy')
to_delete.append(new_blob)
base_contents = blob.download_as_string()
copied_contents = new_blob.download_as_string()
self.assertEqual(base_contents, copied_contents)
finally:
for blob in to_delete:
retry_429(blob.delete)()
@unittest.skipUnless(USER_PROJECT, 'USER_PROJECT not set in environment.')
def test_bucket_get_blob_with_user_project(self):
new_bucket_name = 'w-requester-pays' + unique_resource_id('-')
data = b'DEADBEEF'
created = Config.CLIENT.create_bucket(
new_bucket_name, requester_pays=True)
self.case_buckets_to_delete.append(new_bucket_name)
self.assertEqual(created.name, new_bucket_name)
self.assertTrue(created.requester_pays)
with_user_project = Config.CLIENT.bucket(
new_bucket_name, user_project=USER_PROJECT)
self.assertIsNone(with_user_project.get_blob('nonesuch'))
to_add = created.blob('blob-name')
to_add.upload_from_string(data)
try:
found = with_user_project.get_blob('blob-name')
self.assertEqual(found.download_as_string(), data)
finally:
to_add.delete()
class TestStorageFiles(unittest.TestCase):
DIRNAME = os.path.realpath(os.path.dirname(__file__))
FILES = {
'logo': {
'path': DIRNAME + '/data/CloudPlatform_128px_Retina.png',
},
'big': {
'path': DIRNAME + '/data/five-point-one-mb-file.zip',
},
'simple': {
'path': DIRNAME + '/data/simple.txt',
}
}
@classmethod
def setUpClass(cls):
super(TestStorageFiles, cls).setUpClass()
for file_data in cls.FILES.values():
with open(file_data['path'], 'rb') as file_obj:
file_data['hash'] = _base64_md5hash(file_obj)
cls.bucket = Config.TEST_BUCKET
def setUp(self):
self.case_blobs_to_delete = []
def tearDown(self):
for blob in self.case_blobs_to_delete:
blob.delete()
class TestStorageWriteFiles(TestStorageFiles):
ENCRYPTION_KEY = 'b23ff11bba187db8c37077e6af3b25b8'
def test_large_file_write_from_stream(self):
blob = self.bucket.blob('LargeFile')
file_data = self.FILES['big']
with open(file_data['path'], 'rb') as file_obj:
blob.upload_from_file(file_obj)
self.case_blobs_to_delete.append(blob)
md5_hash = blob.md5_hash
if not isinstance(md5_hash, six.binary_type):
md5_hash = md5_hash.encode('utf-8')
self.assertEqual(md5_hash, file_data['hash'])
def test_large_encrypted_file_write_from_stream(self):
blob = self.bucket.blob('LargeFile',
encryption_key=self.ENCRYPTION_KEY)
file_data = self.FILES['big']
with open(file_data['path'], 'rb') as file_obj:
blob.upload_from_file(file_obj)
self.case_blobs_to_delete.append(blob)
md5_hash = blob.md5_hash
if not isinstance(md5_hash, six.binary_type):
md5_hash = md5_hash.encode('utf-8')
self.assertEqual(md5_hash, file_data['hash'])
temp_filename = tempfile.mktemp()
with open(temp_filename, 'wb') as file_obj:
blob.download_to_file(file_obj)
with open(temp_filename, 'rb') as file_obj:
md5_temp_hash = _base64_md5hash(file_obj)
self.assertEqual(md5_temp_hash, file_data['hash'])
def test_small_file_write_from_filename(self):
blob = self.bucket.blob('SmallFile')
file_data = self.FILES['simple']
blob.upload_from_filename(file_data['path'])
self.case_blobs_to_delete.append(blob)
md5_hash = blob.md5_hash
if not isinstance(md5_hash, six.binary_type):
md5_hash = md5_hash.encode('utf-8')
self.assertEqual(md5_hash, file_data['hash'])
@unittest.skipUnless(USER_PROJECT, 'USER_PROJECT not set in environment.')
def test_crud_blob_w_user_project(self):
with_user_project = Config.CLIENT.bucket(
self.bucket.name, user_project=USER_PROJECT)
blob = with_user_project.blob('SmallFile')
file_data = self.FILES['simple']
with open(file_data['path'], mode='rb') as to_read:
file_contents = to_read.read()
# Exercise 'objects.insert' w/ userProject.
blob.upload_from_filename(file_data['path'])
try:
# Exercise 'objects.get' (metadata) w/ userProject.
self.assertTrue(blob.exists())
blob.reload()
# Exercise 'objects.get' (media) w/ userProject.
downloaded = blob.download_as_string()
self.assertEqual(downloaded, file_contents)
# Exercise 'objects.patch' w/ userProject.
blob.content_language = 'en'
blob.patch()
self.assertEqual(blob.content_language, 'en')
# Exercise 'objects.update' w/ userProject.
metadata = {
'foo': 'Foo',
'bar': 'Bar',
}
blob.metadata = metadata
blob.update()
self.assertEqual(blob.metadata, metadata)
finally:
# Exercise 'objects.delete' (metadata) w/ userProject.
blob.delete()
@unittest.skipUnless(USER_PROJECT, 'USER_PROJECT not set in environment.')
def test_blob_acl_w_user_project(self):
with_user_project = Config.CLIENT.bucket(
self.bucket.name, user_project=USER_PROJECT)
blob = with_user_project.blob('SmallFile')
file_data = self.FILES['simple']
blob.upload_from_filename(file_data['path'])
self.case_blobs_to_delete.append(blob)
# Exercise bucket ACL w/ userProject
acl = blob.acl
acl.reload()
acl.all().grant_read()
acl.save()
self.assertIn('READER', acl.all().get_roles())
del acl.entities['allUsers']
acl.save()
self.assertFalse(acl.has_entity('allUsers'))
def test_upload_blob_acl(self):
control = self.bucket.blob('logo')
control_data = self.FILES['logo']
blob = self.bucket.blob('SmallFile')
file_data = self.FILES['simple']
try:
control.upload_from_filename(control_data['path'])
blob.upload_from_filename(file_data['path'],
predefined_acl='publicRead')
finally:
self.case_blobs_to_delete.append(blob)
self.case_blobs_to_delete.append(control)
control_acl = control.acl
self.assertNotIn('READER', control_acl.all().get_roles())
acl = blob.acl
self.assertIn('READER', acl.all().get_roles())
acl.all().revoke_read()
self.assertSequenceEqual(acl.all().get_roles(), set([]))
self.assertEqual(control_acl.all().get_roles(), acl.all().get_roles())
def test_write_metadata(self):
filename = self.FILES['logo']['path']
blob_name = os.path.basename(filename)
blob = storage.Blob(blob_name, bucket=self.bucket)
blob.upload_from_filename(filename)
self.case_blobs_to_delete.append(blob)
# NOTE: This should not be necessary. We should be able to pass
# it in to upload_file and also to upload_from_string.
blob.content_type = 'image/png'
self.assertEqual(blob.content_type, 'image/png')
def test_direct_write_and_read_into_file(self):
blob = self.bucket.blob('MyBuffer')
file_contents = b'Hello World'
blob.upload_from_string(file_contents)
self.case_blobs_to_delete.append(blob)
same_blob = self.bucket.blob('MyBuffer')
same_blob.reload() # Initialize properties.
temp_filename = tempfile.mktemp()
with open(temp_filename, 'wb') as file_obj:
same_blob.download_to_file(file_obj)
with open(temp_filename, 'rb') as file_obj:
stored_contents = file_obj.read()
self.assertEqual(file_contents, stored_contents)
def test_copy_existing_file(self):
filename = self.FILES['logo']['path']
blob = storage.Blob('CloudLogo', bucket=self.bucket)
blob.upload_from_filename(filename)
self.case_blobs_to_delete.append(blob)
new_blob = retry_bad_copy(self.bucket.copy_blob)(
blob, self.bucket, 'CloudLogoCopy')
self.case_blobs_to_delete.append(new_blob)
base_contents = blob.download_as_string()
copied_contents = new_blob.download_as_string()
self.assertEqual(base_contents, copied_contents)
class TestUnicode(unittest.TestCase):
def test_fetch_object_and_check_content(self):
client = storage.Client()
bucket = client.bucket('storage-library-test-bucket')
# Note: These files are public.
# Normalization form C: a single character for e-acute;
# URL should end with Cafe%CC%81
# Normalization Form D: an ASCII e followed by U+0301 combining
# character; URL should end with Caf%C3%A9
test_data = {
u'Caf\u00e9': b'Normalization Form C',
u'Cafe\u0301': b'Normalization Form D',
}
for blob_name, file_contents in test_data.items():
blob = bucket.blob(blob_name)
self.assertEqual(blob.name, blob_name)
self.assertEqual(blob.download_as_string(), file_contents)
class TestStorageListFiles(TestStorageFiles):
FILENAMES = ('CloudLogo1', 'CloudLogo2', 'CloudLogo3')
@classmethod
def setUpClass(cls):
super(TestStorageListFiles, cls).setUpClass()
# Make sure bucket empty before beginning.
_empty_bucket(cls.bucket)
logo_path = cls.FILES['logo']['path']
blob = storage.Blob(cls.FILENAMES[0], bucket=cls.bucket)
blob.upload_from_filename(logo_path)
cls.suite_blobs_to_delete = [blob]
# Copy main blob onto remaining in FILENAMES.
for filename in cls.FILENAMES[1:]:
new_blob = retry_bad_copy(cls.bucket.copy_blob)(
blob, cls.bucket, filename)
cls.suite_blobs_to_delete.append(new_blob)
@classmethod
def tearDownClass(cls):
for blob in cls.suite_blobs_to_delete:
blob.delete()
@RetryErrors(unittest.TestCase.failureException)
def test_list_files(self):
all_blobs = list(self.bucket.list_blobs())
self.assertEqual(sorted(blob.name for blob in all_blobs),
sorted(self.FILENAMES))
@unittest.skipUnless(USER_PROJECT, 'USER_PROJECT not set in environment.')
@RetryErrors(unittest.TestCase.failureException)
def test_list_files_with_user_project(self):
with_user_project = Config.CLIENT.bucket(
self.bucket.name, user_project=USER_PROJECT)
all_blobs = list(with_user_project.list_blobs())
self.assertEqual(sorted(blob.name for blob in all_blobs),
sorted(self.FILENAMES))
@RetryErrors(unittest.TestCase.failureException)
def test_paginate_files(self):
truncation_size = 1
count = len(self.FILENAMES) - truncation_size
iterator = self.bucket.list_blobs(max_results=count)
page_iter = iterator.pages
page1 = six.next(page_iter)
blobs = list(page1)
self.assertEqual(len(blobs), count)
self.assertIsNotNone(iterator.next_page_token)
# Technically the iterator is exhausted.
self.assertEqual(iterator.num_results, iterator.max_results)
# But we modify the iterator to continue paging after
# articially stopping after ``count`` items.
iterator.max_results = None
page2 = six.next(page_iter)
last_blobs = list(page2)
self.assertEqual(len(last_blobs), truncation_size)
class TestStoragePseudoHierarchy(TestStorageFiles):
FILENAMES = (
'file01.txt',
'parent/file11.txt',
'parent/child/file21.txt',
'parent/child/file22.txt',
'parent/child/grand/file31.txt',
'parent/child/other/file32.txt',
)
@classmethod
def setUpClass(cls):
super(TestStoragePseudoHierarchy, cls).setUpClass()
# Make sure bucket empty before beginning.
_empty_bucket(cls.bucket)
cls.suite_blobs_to_delete = []
simple_path = cls.FILES['simple']['path']
for filename in cls.FILENAMES:
blob = storage.Blob(filename, bucket=cls.bucket)
blob.upload_from_filename(simple_path)
cls.suite_blobs_to_delete.append(blob)
@classmethod
def tearDownClass(cls):
for blob in cls.suite_blobs_to_delete:
blob.delete()
@RetryErrors(unittest.TestCase.failureException)
def test_blob_get_w_delimiter(self):
for filename in self.FILENAMES:
blob = self.bucket.blob(filename)
self.assertTrue(blob.exists(), filename)
@RetryErrors(unittest.TestCase.failureException)
def test_root_level_w_delimiter(self):
iterator = self.bucket.list_blobs(delimiter='/')
page = six.next(iterator.pages)
blobs = list(page)
self.assertEqual([blob.name for blob in blobs], ['file01.txt'])
self.assertIsNone(iterator.next_page_token)
self.assertEqual(iterator.prefixes, set(['parent/']))
@RetryErrors(unittest.TestCase.failureException)
def test_first_level(self):
iterator = self.bucket.list_blobs(delimiter='/', prefix='parent/')
page = six.next(iterator.pages)
blobs = list(page)
self.assertEqual([blob.name for blob in blobs], ['parent/file11.txt'])
self.assertIsNone(iterator.next_page_token)
self.assertEqual(iterator.prefixes, set(['parent/child/']))
@RetryErrors(unittest.TestCase.failureException)
def test_second_level(self):
expected_names = [
'parent/child/file21.txt',
'parent/child/file22.txt',
]
iterator = self.bucket.list_blobs(delimiter='/',
prefix='parent/child/')
page = six.next(iterator.pages)
blobs = list(page)
self.assertEqual([blob.name for blob in blobs],
expected_names)
self.assertIsNone(iterator.next_page_token)
self.assertEqual(iterator.prefixes,
set(['parent/child/grand/', 'parent/child/other/']))
@RetryErrors(unittest.TestCase.failureException)
def test_third_level(self):
# Pseudo-hierarchy can be arbitrarily deep, subject to the limit
# of 1024 characters in the UTF-8 encoded name:
# https://cloud.google.com/storage/docs/bucketnaming#objectnames
# Exercise a layer deeper to illustrate this.
iterator = self.bucket.list_blobs(delimiter='/',
prefix='parent/child/grand/')
page = six.next(iterator.pages)
blobs = list(page)
self.assertEqual([blob.name for blob in blobs],
['parent/child/grand/file31.txt'])
self.assertIsNone(iterator.next_page_token)
self.assertEqual(iterator.prefixes, set())
class TestStorageSignURLs(TestStorageFiles):
def setUp(self):
super(TestStorageSignURLs, self).setUp()
logo_path = self.FILES['logo']['path']
with open(logo_path, 'rb') as file_obj:
self.LOCAL_FILE = file_obj.read()
blob = self.bucket.blob('LogoToSign.jpg')
blob.upload_from_string(self.LOCAL_FILE)
self.case_blobs_to_delete.append(blob)
def tearDown(self):
for blob in self.case_blobs_to_delete:
if blob.exists():
blob.delete()
def test_create_signed_read_url(self):
blob = self.bucket.blob('LogoToSign.jpg')
expiration = int(time.time() + 10)
signed_url = blob.generate_signed_url(expiration, method='GET',
client=Config.CLIENT)
response = requests.get(signed_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, self.LOCAL_FILE)
def test_create_signed_delete_url(self):
blob = self.bucket.blob('LogoToSign.jpg')
expiration = int(time.time() + 283473274)
signed_delete_url = blob.generate_signed_url(expiration,
method='DELETE',
client=Config.CLIENT)
response = requests.request('DELETE', signed_delete_url)
self.assertEqual(response.status_code, 204)
self.assertEqual(response.content, b'')
# Check that the blob has actually been deleted.
self.assertFalse(blob.exists())
class TestStorageCompose(TestStorageFiles):
FILES = {}
def test_compose_create_new_blob(self):
SOURCE_1 = b'AAA\n'
source_1 = self.bucket.blob('source-1')
source_1.upload_from_string(SOURCE_1)
self.case_blobs_to_delete.append(source_1)
SOURCE_2 = b'BBB\n'
source_2 = self.bucket.blob('source-2')
source_2.upload_from_string(SOURCE_2)
self.case_blobs_to_delete.append(source_2)
destination = self.bucket.blob('destination')
destination.content_type = 'text/plain'
destination.compose([source_1, source_2])
self.case_blobs_to_delete.append(destination)
composed = destination.download_as_string()
self.assertEqual(composed, SOURCE_1 + SOURCE_2)
def test_compose_replace_existing_blob(self):
BEFORE = b'AAA\n'
original = self.bucket.blob('original')
original.content_type = 'text/plain'
original.upload_from_string(BEFORE)
self.case_blobs_to_delete.append(original)
TO_APPEND = b'BBB\n'
to_append = self.bucket.blob('to_append')
to_append.upload_from_string(TO_APPEND)
self.case_blobs_to_delete.append(to_append)
original.compose([original, to_append])
composed = original.download_as_string()
self.assertEqual(composed, BEFORE + TO_APPEND)
@unittest.skipUnless(USER_PROJECT, 'USER_PROJECT not set in environment.')
def test_compose_with_user_project(self):
new_bucket_name = 'compose-user-project' + unique_resource_id('-')
created = Config.CLIENT.create_bucket(
new_bucket_name, requester_pays=True)
try:
SOURCE_1 = b'AAA\n'
source_1 = created.blob('source-1')
source_1.upload_from_string(SOURCE_1)
SOURCE_2 = b'BBB\n'
source_2 = created.blob('source-2')
source_2.upload_from_string(SOURCE_2)
with_user_project = Config.CLIENT.bucket(
new_bucket_name, user_project=USER_PROJECT)
destination = with_user_project.blob('destination')
destination.content_type = 'text/plain'
destination.compose([source_1, source_2])
composed = destination.download_as_string()
self.assertEqual(composed, SOURCE_1 + SOURCE_2)
finally:
retry_429(created.delete)(force=True)
class TestStorageRewrite(TestStorageFiles):
FILENAMES = (
'file01.txt',
)
def test_rewrite_create_new_blob_add_encryption_key(self):
file_data = self.FILES['simple']
source = self.bucket.blob('source')
source.upload_from_filename(file_data['path'])
self.case_blobs_to_delete.append(source)
source_data = source.download_as_string()
KEY = os.urandom(32)
dest = self.bucket.blob('dest', encryption_key=KEY)
token, rewritten, total = dest.rewrite(source)
self.case_blobs_to_delete.append(dest)
self.assertEqual(token, None)
self.assertEqual(rewritten, len(source_data))
self.assertEqual(total, len(source_data))
self.assertEqual(source.download_as_string(),
dest.download_as_string())
def test_rewrite_rotate_encryption_key(self):
BLOB_NAME = 'rotating-keys'
file_data = self.FILES['simple']
SOURCE_KEY = os.urandom(32)
source = self.bucket.blob(BLOB_NAME, encryption_key=SOURCE_KEY)
source.upload_from_filename(file_data['path'])
self.case_blobs_to_delete.append(source)
source_data = source.download_as_string()
DEST_KEY = os.urandom(32)
dest = self.bucket.blob(BLOB_NAME, encryption_key=DEST_KEY)
token, rewritten, total = dest.rewrite(source)
# Not adding 'dest' to 'self.case_blobs_to_delete': it is the
# same object as 'source'.
self.assertIsNone(token)
self.assertEqual(rewritten, len(source_data))
self.assertEqual(total, len(source_data))
self.assertEqual(dest.download_as_string(), source_data)
@unittest.skipUnless(USER_PROJECT, 'USER_PROJECT not set in environment.')
def test_rewrite_add_key_with_user_project(self):
file_data = self.FILES['simple']
new_bucket_name = 'rewrite-key-up' + unique_resource_id('-')
created = Config.CLIENT.create_bucket(
new_bucket_name, requester_pays=True)
try:
with_user_project = Config.CLIENT.bucket(
new_bucket_name, user_project=USER_PROJECT)
source = with_user_project.blob('source')
source.upload_from_filename(file_data['path'])
source_data = source.download_as_string()
KEY = os.urandom(32)
dest = with_user_project.blob('dest', encryption_key=KEY)
token, rewritten, total = dest.rewrite(source)
self.assertEqual(token, None)
self.assertEqual(rewritten, len(source_data))
self.assertEqual(total, len(source_data))
self.assertEqual(source.download_as_string(),
dest.download_as_string())
finally:
retry_429(created.delete)(force=True)
@unittest.skipUnless(USER_PROJECT, 'USER_PROJECT not set in environment.')
def test_rewrite_rotate_with_user_project(self):
BLOB_NAME = 'rotating-keys'
file_data = self.FILES['simple']
new_bucket_name = 'rewrite-rotate-up' + unique_resource_id('-')
created = Config.CLIENT.create_bucket(
new_bucket_name, requester_pays=True)
try:
with_user_project = Config.CLIENT.bucket(
new_bucket_name, user_project=USER_PROJECT)
SOURCE_KEY = os.urandom(32)
source = with_user_project.blob(
BLOB_NAME, encryption_key=SOURCE_KEY)
source.upload_from_filename(file_data['path'])
source_data = source.download_as_string()
DEST_KEY = os.urandom(32)
dest = with_user_project.blob(BLOB_NAME, encryption_key=DEST_KEY)
token, rewritten, total = dest.rewrite(source)
self.assertEqual(token, None)
self.assertEqual(rewritten, len(source_data))
self.assertEqual(total, len(source_data))
self.assertEqual(dest.download_as_string(), source_data)
finally:
retry_429(created.delete)(force=True)
class TestStorageNotificationCRUD(unittest.TestCase):
topic = None
TOPIC_NAME = 'notification' + unique_resource_id('-')
CUSTOM_ATTRIBUTES = {
'attr1': 'value1',
'attr2': 'value2',
}
BLOB_NAME_PREFIX = 'blob-name-prefix/'
@property
def topic_path(self):
return 'projects/{}/topics/{}'.format(
Config.CLIENT.project, self.TOPIC_NAME)
def _intialize_topic(self):
try:
from google.cloud.pubsub_v1 import PublisherClient
except ImportError:
raise unittest.SkipTest("Cannot import pubsub")
self.publisher_client = PublisherClient()
retry_429(self.publisher_client.create_topic)(self.topic_path)
policy = self.publisher_client.get_iam_policy(self.topic_path)
binding = policy.bindings.add()
binding.role = 'roles/pubsub.publisher'
binding.members.append(
'serviceAccount:{}'
'@gs-project-accounts.iam.gserviceaccount.com'.format(
Config.CLIENT.project))
self.publisher_client.set_iam_policy(self.topic_path, policy)
def setUp(self):
self.case_buckets_to_delete = []
self._intialize_topic()
def tearDown(self):
retry_429(self.publisher_client.delete_topic)(self.topic_path)
with Config.CLIENT.batch():
for bucket_name in self.case_buckets_to_delete:
bucket = Config.CLIENT.bucket(bucket_name)
retry_429(bucket.delete)()
@staticmethod
def event_types():
from google.cloud.storage.notification import (
OBJECT_FINALIZE_EVENT_TYPE,
OBJECT_DELETE_EVENT_TYPE)
return [OBJECT_FINALIZE_EVENT_TYPE, OBJECT_DELETE_EVENT_TYPE]
@staticmethod
def payload_format():
from google.cloud.storage.notification import (
JSON_API_V1_PAYLOAD_FORMAT)
return JSON_API_V1_PAYLOAD_FORMAT
def test_notification_minimal(self):
new_bucket_name = 'notification-minimal' + unique_resource_id('-')
bucket = retry_429(Config.CLIENT.create_bucket)(new_bucket_name)
self.case_buckets_to_delete.append(new_bucket_name)
self.assertEqual(list(bucket.list_notifications()), [])
notification = bucket.notification(self.TOPIC_NAME)
retry_429(notification.create)()
try:
self.assertTrue(notification.exists())
self.assertIsNotNone(notification.notification_id)
notifications = list(bucket.list_notifications())
self.assertEqual(len(notifications), 1)
self.assertEqual(notifications[0].topic_name, self.TOPIC_NAME)
finally:
notification.delete()
def test_notification_explicit(self):
new_bucket_name = 'notification-explicit' + unique_resource_id('-')
bucket = retry_429(Config.CLIENT.create_bucket)(new_bucket_name)
self.case_buckets_to_delete.append(new_bucket_name)
notification = bucket.notification(
self.TOPIC_NAME,
custom_attributes=self.CUSTOM_ATTRIBUTES,
event_types=self.event_types(),
blob_name_prefix=self.BLOB_NAME_PREFIX,
payload_format=self.payload_format(),
)
retry_429(notification.create)()
try:
self.assertTrue(notification.exists())
self.assertIsNotNone(notification.notification_id)
self.assertEqual(
notification.custom_attributes, self.CUSTOM_ATTRIBUTES)
self.assertEqual(notification.event_types, self.event_types())
self.assertEqual(
notification.blob_name_prefix, self.BLOB_NAME_PREFIX)
self.assertEqual(
notification.payload_format, self.payload_format())
finally:
notification.delete()
@unittest.skipUnless(USER_PROJECT, 'USER_PROJECT not set in environment.')
def test_notification_w_user_project(self):
new_bucket_name = 'notification-minimal' + unique_resource_id('-')
retry_429(Config.CLIENT.create_bucket)(
new_bucket_name, requester_pays=True)
self.case_buckets_to_delete.append(new_bucket_name)
with_user_project = Config.CLIENT.bucket(
new_bucket_name, user_project=USER_PROJECT)
self.assertEqual(list(with_user_project.list_notifications()), [])
notification = with_user_project.notification(self.TOPIC_NAME)
retry_429(notification.create)()
try:
self.assertTrue(notification.exists())
self.assertIsNotNone(notification.notification_id)
notifications = list(with_user_project.list_notifications())
self.assertEqual(len(notifications), 1)
self.assertEqual(notifications[0].topic_name, self.TOPIC_NAME)
finally:
notification.delete()
class TestAnonymousClient(unittest.TestCase):
PUBLIC_BUCKET = 'gcp-public-data-landsat'
def test_access_to_public_bucket(self):
anonymous = storage.Client.create_anonymous_client()
bucket = anonymous.bucket(self.PUBLIC_BUCKET)
blob, = bucket.list_blobs(max_results=1)
with tempfile.TemporaryFile() as stream:
blob.download_to_file(stream)
| 37.340383 | 78 | 0.650287 |
c4f478ca4f9cf10ef8452424466ba4ad9e9d3a0a
| 33 |
py
|
Python
|
__init__.py
|
YusukeKambara/japan_horse_racing
|
05c2e06fe265c5744b908b8575df260db18a115b
|
[
"MIT"
] | null | null | null |
__init__.py
|
YusukeKambara/japan_horse_racing
|
05c2e06fe265c5744b908b8575df260db18a115b
|
[
"MIT"
] | 1 |
2021-12-13T20:32:18.000Z
|
2021-12-13T20:32:18.000Z
|
__init__.py
|
YusukeKambara/japan_horse_racing
|
05c2e06fe265c5744b908b8575df260db18a115b
|
[
"MIT"
] | null | null | null |
from japan_horse_racing import *
| 16.5 | 32 | 0.848485 |
34d425dee737455cd36ac9ef2f462358c7ee8f1f
| 666 |
py
|
Python
|
src/cmcandy/Python_language_Answers/_0021.py
|
ch98road/leetcode
|
a9b4be54a169b30f6711809b892dd1f79f2a17e7
|
[
"MIT"
] | null | null | null |
src/cmcandy/Python_language_Answers/_0021.py
|
ch98road/leetcode
|
a9b4be54a169b30f6711809b892dd1f79f2a17e7
|
[
"MIT"
] | null | null | null |
src/cmcandy/Python_language_Answers/_0021.py
|
ch98road/leetcode
|
a9b4be54a169b30f6711809b892dd1f79f2a17e7
|
[
"MIT"
] | 1 |
2020-11-26T03:01:12.000Z
|
2020-11-26T03:01:12.000Z
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
head = ListNode()
index = head
while l1 is not None and l2 is not None:
if l1.val <= l2.val:
index.next = l1
l1 = l1.next
else:
index.next = l2
l2 = l2.next
index = index.next
if l1 is not None:
index.next = l1
if l2 is not None:
index.next = l2
return head.next
| 25.615385 | 68 | 0.5 |
9c887a55b2dbebd940cdc3a33a3692c47d3f96c4
| 2,341 |
py
|
Python
|
python-syn/pyformance/reporters/opentsdb_reporter.py
|
harnitsignalfx/tf-synthetics
|
059e10b4e22d40899d7784fdc48a6f04c8eec9ec
|
[
"MIT"
] | null | null | null |
python-syn/pyformance/reporters/opentsdb_reporter.py
|
harnitsignalfx/tf-synthetics
|
059e10b4e22d40899d7784fdc48a6f04c8eec9ec
|
[
"MIT"
] | 1 |
2018-12-24T16:51:01.000Z
|
2018-12-24T16:51:01.000Z
|
pyformance/reporters/opentsdb_reporter.py
|
michalten/pyformance
|
34af1f230e3013b07a484c107a87dc61c0d79f42
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
from .reporter import Reporter
import base64
import json
if sys.version_info[0] > 2:
import urllib.request as urllib
import urllib.error as urlerror
else:
import urllib2 as urllib
import urllib2 as urlerror
class OpenTSDBReporter(Reporter):
"""
This reporter requires a tuple (application_name, write_key) to put data to opentsdb database
"""
def __init__(self, application_name, write_key, url, registry=None, reporting_interval=10, clock=None, prefix="",
tags={}):
super(OpenTSDBReporter, self).__init__(registry=registry,
reporting_interval=reporting_interval,
clock=clock)
self.url = url
self.application_name = application_name
self.write_key = write_key
self.prefix = prefix
self.tags = tags or {}
def report_now(self, registry=None, timestamp=None):
metrics = self._collect_metrics(registry or self.registry, timestamp)
if metrics:
try:
request = urllib.Request(self.url,
data=json.dumps(metrics).encode("utf-8"),
headers={'content-type': "application/json"})
authentication_data = "{0}:{1}".format(self.application_name, self.write_key)
auth_header = base64.b64encode(bytes(authentication_data.encode("utf-8")))
request.add_header("Authorization", "Basic {0}".format(auth_header))
urllib.urlopen(request)
except Exception as e:
sys.stderr.write("{0}\n".format(e))
def _collect_metrics(self, registry, timestamp=None):
timestamp = timestamp or int(round(self.clock.time()))
metrics = registry.dump_metrics()
metrics_data = []
for key in metrics.keys():
for value_key in metrics[key].keys():
metrics_data.append({
'metric': "{0}{1}.{2}".format(self.prefix, key, value_key),
'value': metrics[key][value_key],
'timestamp': timestamp,
'tags': self.tags,
})
return metrics_data
| 39.016667 | 117 | 0.576677 |
b46b3eab09f637ca23ed2961cf78d56f51aa9450
| 4,997 |
py
|
Python
|
qa/rpc-tests/wallet-hd.py
|
JoeGruffins/absolute
|
7dc6a34acbca84391e57ee754418f8251f99396e
|
[
"MIT"
] | 9 |
2018-02-14T16:09:39.000Z
|
2021-02-01T02:15:51.000Z
|
qa/rpc-tests/wallet-hd.py
|
JoeGruffins/absolute
|
7dc6a34acbca84391e57ee754418f8251f99396e
|
[
"MIT"
] | 4 |
2018-02-22T05:05:42.000Z
|
2018-05-29T09:10:10.000Z
|
qa/rpc-tests/wallet-hd.py
|
JoeGruffins/absolute
|
7dc6a34acbca84391e57ee754418f8251f99396e
|
[
"MIT"
] | 11 |
2018-02-14T17:13:38.000Z
|
2018-04-28T13:43:59.000Z
|
#!/usr/bin/env python2
# coding=utf-8
# ^^^^^^^^^^^^ TODO remove when supporting only Python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Hierarchical Deterministic wallet function."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletHDTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
self.nodes = start_nodes(2, self.options.tmpdir, [['-usehd=0'], ['-usehd=1', '-keypool=0']])
self.is_network_split = False
connect_nodes_bi(self.nodes, 0, 1)
self.is_network_split=False
self.sync_all()
def run_test (self):
tmpdir = self.options.tmpdir
# Make sure can't switch off usehd after wallet creation
stop_node(self.nodes[1],1)
try:
start_node(1, self.options.tmpdir, ['-usehd=0'])
raise AssertionError("Must not allow to turn off HD on an already existing HD wallet")
except Exception as e:
assert("absoluted exited with status 1 during initialization" in str(e))
# assert_start_raises_init_error(1, self.options.tmpdir, ['-usehd=0'], 'already existing HD wallet')
# self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
self.nodes[1] = start_node(1, self.options.tmpdir, ['-usehd=1', '-keypool=0'])
connect_nodes_bi(self.nodes, 0, 1)
# Make sure we use hd, keep chainid
chainid = self.nodes[1].getwalletinfo()['hdchainid']
assert_equal(len(chainid), 64)
# create an internal key
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr);
assert_equal(change_addrV["hdkeypath"], "m/44'/1'/0'/1/0") #first internal child key
# Import a non-HD private key in the HD wallet
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
#self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
# Derive some HD addresses and remember the last
# Also send funds to each add
self.nodes[0].generate(101)
hd_add = None
num_hd_adds = 300
for i in range(num_hd_adds):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].validateaddress(hd_add)
assert_equal(hd_info["hdkeypath"], "m/44'/1'/0'/0/"+str(i+1))
assert_equal(hd_info["hdchainid"], chainid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
# create an internal key (again)
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr);
assert_equal(change_addrV["hdkeypath"], "m/44'/1'/0'/1/1") #second internal child key
self.sync_all()
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
print("Restore backup ...")
stop_node(self.nodes[1],1)
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
self.nodes[1] = start_node(1, self.options.tmpdir, ['-usehd=1', '-keypool=0'])
#connect_nodes_bi(self.nodes, 0, 1)
# Assert that derivation is deterministic
hd_add_2 = None
for _ in range(num_hd_adds):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/44'/1'/0'/0/"+str(_+1))
assert_equal(hd_info_2["hdchainid"], chainid)
assert_equal(hd_add, hd_add_2)
# Needs rescan
stop_node(self.nodes[1],1)
self.nodes[1] = start_node(1, self.options.tmpdir, ['-usehd=1', '-keypool=0', '-rescan'])
#connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# send a tx and make sure its using the internal chain for the changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout'];
keypath = ""
for out in outs:
if out['value'] != 1:
keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath']
assert_equal(keypath[0:13], "m/44'/1'/0'/1")
if __name__ == '__main__':
WalletHDTest().main ()
| 43.833333 | 108 | 0.638983 |
6458da4c4bc0bdec37b45dfafdd7fd32a7c8f500
| 276 |
py
|
Python
|
dataset/dataset.py
|
CyberFuffa/backtrader-binance-bot
|
d9497228281f4b62ea271960d4f36a7071cd28f8
|
[
"MIT"
] | 237 |
2019-04-08T06:26:52.000Z
|
2022-03-26T17:14:33.000Z
|
dataset/dataset.py
|
CyberFuffa/backtrader-binance-bot
|
d9497228281f4b62ea271960d4f36a7071cd28f8
|
[
"MIT"
] | 8 |
2019-04-20T14:01:46.000Z
|
2021-03-06T21:44:22.000Z
|
dataset/dataset.py
|
CyberFuffa/backtrader-binance-bot
|
d9497228281f4b62ea271960d4f36a7071cd28f8
|
[
"MIT"
] | 119 |
2019-04-26T08:50:03.000Z
|
2022-03-24T16:09:42.000Z
|
import backtrader as bt
class CustomDataset(bt.feeds.GenericCSVData):
params = (
('time', -1),
('datetime', 0),
('open', 1),
('high', 2),
('low', 3),
('close', 4),
('volume', 5),
('openinterest', 6),
)
| 18.4 | 45 | 0.431159 |
a6e3b675dfa51a4789138f774930a5853e1b16e7
| 17,802 |
py
|
Python
|
runtime/hetdesrun/runtime/engine/plain/workflow.py
|
pka1024/hetida-designer
|
3a191c512dfb37a897b89ab06a9f6ac18b3d9110
|
[
"MIT"
] | null | null | null |
runtime/hetdesrun/runtime/engine/plain/workflow.py
|
pka1024/hetida-designer
|
3a191c512dfb37a897b89ab06a9f6ac18b3d9110
|
[
"MIT"
] | null | null | null |
runtime/hetdesrun/runtime/engine/plain/workflow.py
|
pka1024/hetida-designer
|
3a191c512dfb37a897b89ab06a9f6ac18b3d9110
|
[
"MIT"
] | null | null | null |
from typing import (
Protocol,
Dict,
Tuple,
Any,
List,
Callable,
Coroutine,
Optional,
Union,
)
import logging
from inspect import signature, Parameter
from cached_property import cached_property # async compatible variant
from pydantic import ValidationError
from hetdesrun.datatypes import NamedDataTypedValue, parse_dynamically_from_datatypes
from hetdesrun.runtime import runtime_component_logger
from hetdesrun.runtime.logging import execution_context_filter
from hetdesrun.runtime.engine.plain.execution import run_func_or_coroutine
from hetdesrun.runtime.exceptions import (
RuntimeExecutionError,
CircularDependency,
MissingOutputException,
MissingInputSource,
WorkflowInputDataValidationError,
)
logger = logging.getLogger(__name__)
logger.addFilter(execution_context_filter)
runtime_component_logger.addFilter(execution_context_filter)
class Node(Protocol):
"""Protocol for common structural type features for workflow (sub)nodes"""
# Note: The result attribute should be defined to be a cached_property in actual implementations
# to avoid recomputing the result several times when values are accessed as inputs for more than
# one other node.
_in_computation: bool = False
operator_hierarchical_id: str = "UNKNOWN"
operator_hierarchical_name: str = "UNKNOWN"
@cached_property
async def result(self) -> Dict[str, Any]: # Outputs can have any type
...
def add_inputs(self, new_inputs: Dict[str, Tuple["Node", str]]) -> None:
...
class ComputationNode: # pylint: disable=too-many-instance-attributes
"""Represents a function computation with multiple outputs together with input information
Inputs and outputs are not made explicit here and the information where the inputs
come from does not need to be complete at initialization time. To add input information later
use the add_inputs method. Completeness of input is checked when starting a function computation
and incomplete input information then leads to an appropriate exception.
Which inputs are actually required depends implicitely on the actual function. This allows
the function to have true optional (keyword) arguments.
The result is a cached property such that computation only runs once during the node's lifetime.
The computation collects result from other computation nodes result properties and therefore may
trigger a self-organizing computation graph execution.
Circular dependencies are detected during computation and an appropriate exception is raised.
"""
def __init__(
self,
func: Union[Coroutine, Callable],
inputs: Optional[Dict[str, Tuple[Node, str]]] = None,
operator_hierarchical_id: str = "UNKNOWN",
component_id: str = "UNKNOWN",
operator_hierarchical_name: str = "UNKNOWN",
component_name: str = "UNKNOWN",
) -> None:
"""
inputs is a dict {input_name : (another_node, output_name)}, i.e. mapping input names to
pairs (another_node, output_name). Inputs do not need to be provided at initialization.
func is a function or coroutine function with the input_names as keyword arguments. It
should output a dict of result values.
operator_hierarchical_id, component_id, operator_hierarchical_name and component_name can be
provided to enrich logging and exception messages.
The computation node inputs may or may not be complete, i.e. all required inputs are given
or not. If not complete, computation of result may simply fail, e.g. with
TypeError: <lambda>() missing 1 required positional argument: 'base_value'
or
TypeError: f() missing 1 required keyword-only argument: 'base_value'
However the availability of all inputs is checked during execution by inspecting the
provided func and comparing to currently set inputs. MissingInputSource exception is
raised if inputs are missing.
"""
self.inputs: Dict[str, Tuple[Node, str]] = {}
if inputs is not None:
self.add_inputs(inputs)
self.func = func
self.required_params = self._infer_required_params()
self._in_computation = False # to detect cycles
self.operator_hierarchical_id = operator_hierarchical_id
self.operator_hierarchical_name = operator_hierarchical_name
self.component_id = component_id
self.component_name = component_name
self._in_computation = False
def add_inputs(self, new_inputs: Dict[str, Tuple[Node, str]]) -> None:
self.inputs.update(new_inputs)
def _infer_required_params(self) -> List[str]:
"""Infer the function params which are actually required (i.e. no default value)"""
kwargable_params = [
param
for param in signature(self.func).parameters.values() # type: ignore
if (param.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY))
]
# only non-default-valued params are required:
return [
param.name for param in kwargable_params if param.default is Parameter.empty
]
def all_required_inputs_set(self) -> bool:
return set(self.required_params).issubset(set(self.inputs.keys()))
def _check_inputs(self) -> None:
"""Check and handle missing inputs"""
if not self.all_required_inputs_set():
logger.info("Computation node execution failed due to missing input source")
raise MissingInputSource(
f"Inputs of computation node operator {self.operator_hierarchical_id} are missing"
).set_context(
operator_hierarchical_id=self.operator_hierarchical_id,
operator_hierarchical_name=self.operator_hierarchical_name,
)
async def _gather_data_from_inputs(self) -> Dict[str, Any]:
"""Get data from inputs and handle possible cycles"""
input_value_dict: Dict[str, Any] = {}
for (input_name, (another_node, output_name)) in self.inputs.items():
# Cycle detection logic
if another_node._in_computation: # pylint: disable=protected-access
msg = (
f"Circular Dependency detected at operator {self.operator_hierarchical_id}"
f" whith input '{input_name}' pointing to output '{output_name}'"
f" of operator {another_node.operator_hierarchical_id}"
)
logger.info(msg)
raise CircularDependency(msg).set_context(
operator_hierarchical_id=self.operator_hierarchical_id,
operator_hierarchical_name=self.operator_hierarchical_name,
)
# actually get input data from other nodes
try:
input_value_dict[input_name] = (await another_node.result)[output_name]
except KeyError as e:
# possibly an output_name missing in the result dict of one of the providing nodes!
logger.info(
"Execution failed due to missing output of a node",
exc_info=True,
)
raise MissingOutputException(
"Could not obtain output result from another node while preparing to "
"run operator"
).set_context(
operator_hierarchical_id=self.operator_hierarchical_id,
operator_hierarchical_name=self.operator_hierarchical_name,
) from e
return input_value_dict
async def _run_comp_func(self, input_values: Dict[str, Any]) -> Dict[str, Any]:
"""Running the component func with exception handling"""
try:
function_result: Dict[str, Any] = await run_func_or_coroutine(
self.func, input_values # type: ignore
)
function_result = function_result if function_result is not None else {}
except RuntimeExecutionError as e: # user code may raise runtime execution errors
e.set_context(
self.operator_hierarchical_id, self.operator_hierarchical_name
)
logger.info(
(
"User raised Runtime execution exception during component execution"
" of operator %s with UUID %s of component %s with UUID %s"
),
self.operator_hierarchical_name,
self.operator_hierarchical_id,
self.component_name,
self.component_id,
exc_info=True,
)
raise
except Exception as e: # uncaught exceptions from user code
logger.info(
"Exception during Component execution of component instance %s",
self.operator_hierarchical_name,
exc_info=True,
)
raise RuntimeExecutionError(
f"Exception during Component execution of "
f"component instance {self.operator_hierarchical_name}"
f" (operator hierarchical id: {self.operator_hierarchical_id}):\n{str(e)}"
).set_context(
self.operator_hierarchical_id, self.operator_hierarchical_name
) from e
if not isinstance(
function_result, dict
): # user functions may return completely unknown type
msg = (
f"Component function of component instance {self.operator_hierarchical_id} from "
f"component {self.operator_hierarchical_name} did not return an output dict!"
)
logger.info(msg)
raise RuntimeExecutionError(msg).set_context(
self.operator_hierarchical_id, self.operator_hierarchical_name
)
return function_result
async def _compute_result(self) -> Dict[str, Any]:
# set filter for contextualized logging
execution_context_filter.bind_context(
currently_executed_instance_id=self.operator_hierarchical_id,
currently_executed_component_id=self.component_id,
currently_executed_component_node_name=self.operator_hierarchical_name,
)
logger.info(
"Starting computation for operator %s of type component with operator id %s",
self.operator_hierarchical_name,
self.operator_hierarchical_id,
)
self._in_computation = True
self._check_inputs()
# Gather data from input sources (detects cycles):
input_values = await self._gather_data_from_inputs()
# Actual execution of current node
function_result = await self._run_comp_func(input_values)
# cleanup
self._in_computation = False
execution_context_filter.clear_context()
return function_result
@cached_property # compute each nodes result only once
async def result(self) -> Dict[str, Any]:
return await self._compute_result()
class Workflow:
"""Grouping computation nodes and other workflows and handling common input/output interface
This class does not ensure that the interface actually handles all lose ends.
"""
def __init__(
self,
sub_nodes: List[Node],
input_mappings: Dict[str, Tuple[Node, str]], # map wf input to sub_node
output_mappings: Dict[
str, Tuple[Node, str]
], # map sub_node outputs to wf outputs
inputs: Optional[Dict[str, Tuple[Node, str]]] = None,
operator_hierarchical_id: str = "UNKNOWN",
operator_hierarchical_name: str = "UNKNOWN",
):
"""Initialize new Workflow
Args:
sub_nodes (List[Node]): The sub nodes of this Workflow. Workflows can have workflows
as subnodes.
input_mappings (Dict[str, Tuple[Node, str]]): How inputs of the workflow are mapped
to inputs of sub nodes. Maps the workflow input name to a pair consisting of a
Node (which should be subnode) and the name of an input of that node.
output_mappings (Dict[str, Tuple[Node, str]]): How outputs of sub nodes are mapped
to workflow outputs. Maps the workflow output name to a pair consisting of a
Node (which should be a subnode) and the name of an output of that node.
inputs: Optional[Dict[str, Tuple[Node, str]]]: Inputs which this workflow gets from
outputs of other Nodes (possibly outside the workflow). This is for example used
for workflows that are sub nodes of workflows. This does not need to be provided
at initialization -- the add_inputs method may be used instead at a later point.
Therefore defaults to None.
operator_hierarchical_id (str, optional): Used in logging and exception messages.
Defaults to "UNKNOWN".
operator_name (str, optional): Used in logging and exception messages. Defaults to
"UNKNOWN".
"""
self.sub_nodes = sub_nodes
self.input_mappings = (
input_mappings # dict wf_input_name : (sub_node, sub_node_input_name)
)
self.output_mappings = (
output_mappings # dict wf_output_name : (sub_node, sub_node_output_name)
)
self.inputs: Dict[str, Tuple[Node, str]] = {}
if inputs is not None:
self.add_inputs(inputs)
self._in_computation: bool = False
self.operator_hierarchical_id = operator_hierarchical_id
self.operator_hierarchical_name = operator_hierarchical_name
def add_inputs(self, new_inputs: Dict[str, Tuple[Node, str]]) -> None:
self.inputs.update(new_inputs)
# wire them to the subnodes, eventually overwriting existing wirings
for key, (another_node, output_name) in new_inputs.items():
sub_node, sub_node_input_name = self.input_mappings[key]
sub_node.add_inputs({sub_node_input_name: (another_node, output_name)})
def add_constant_providing_node(
self,
values: List[NamedDataTypedValue],
add_new_provider_node_to_workflow: bool = True,
id_suffix: str = "",
) -> None:
"""Add a node with no inputs providing workflow input data"""
try:
parsed_values = parse_dynamically_from_datatypes(values).dict()
except ValidationError as e:
raise WorkflowInputDataValidationError(
"The provided data or some constant values could not be parsed into the "
"respective workflow input datatypes"
) from e
Const_Node = ComputationNode(
func=lambda: parsed_values,
inputs={},
operator_hierarchical_name="constant_provider",
operator_hierarchical_id=self.operator_hierarchical_id
+ ":constant_provider"
+ "_"
+ id_suffix,
)
if add_new_provider_node_to_workflow: # make it part of the workflow
self.sub_nodes.append(Const_Node)
self.add_inputs({key: (Const_Node, key) for key in parsed_values.keys()})
def _wire_workflow_inputs(self) -> None:
"""Wire the current inputs via the current input mappings to the appropriate sub nodes"""
for (
wf_inp_name,
(sub_node, sub_node_input_name),
) in self.input_mappings.items():
sub_node.add_inputs({sub_node_input_name: self.inputs[wf_inp_name]})
@cached_property
async def result(self) -> Dict[str, Any]:
self._wire_workflow_inputs()
execution_context_filter.bind_context(
currently_executed_instance_id=self.operator_hierarchical_id,
currently_executed_component_id=None,
currently_executed_component_node_name=self.operator_hierarchical_name,
)
logger.info(
"Starting computation for operator %s of type workflow with operator id %s",
self.operator_hierarchical_name,
self.operator_hierarchical_id,
)
# gather result from workflow operators
results = {}
for (
wf_output_name,
(
sub_node,
sub_node_output_name,
),
) in self.output_mappings.items():
try:
results[wf_output_name] = (await sub_node.result)[sub_node_output_name]
except KeyError as e:
# possibly an output_name missing in the result dict of one of the providing nodes!
logger.info(
"Execution failed due to missing output of a node",
exc_info=True,
)
raise MissingOutputException(
"Could not obtain output result from another node while preparing to "
"run operator"
).set_context(
operator_hierarchical_id=self.operator_hierarchical_id,
operator_hierarchical_name="workflow",
) from e
# cleanup
execution_context_filter.clear_context()
return results
def obtain_all_nodes(wf: Workflow) -> List[ComputationNode]:
all_nodes: List[ComputationNode] = []
for node in wf.sub_nodes:
if isinstance(node, Workflow):
all_nodes = all_nodes + obtain_all_nodes(node)
else:
assert isinstance(node, ComputationNode) # hint for mypy # nosec
all_nodes.append(node)
return all_nodes
| 41.018433 | 100 | 0.647849 |
bdf8804fea3a0f51e9ad5fda4690bb876a6be2af
| 1,144 |
py
|
Python
|
WoC/34/Magic Cards.py
|
BlackVS/Hackerrank
|
017159a2354673dbca14e409e088798c9a54d7e1
|
[
"MIT"
] | null | null | null |
WoC/34/Magic Cards.py
|
BlackVS/Hackerrank
|
017159a2354673dbca14e409e088798c9a54d7e1
|
[
"MIT"
] | null | null | null |
WoC/34/Magic Cards.py
|
BlackVS/Hackerrank
|
017159a2354673dbca14e409e088798c9a54d7e1
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import sys
from collections import *
from itertools import *
from sys import stdin
input = stdin.readline
#sys.stdin = open('G:\hackerrank\WoC\\34\\5. Magic cards\\t0.txt', 'r')
def solve():
N,M,Q=map(int,input().split())
CMAX = M*(M+1)*(2*M+1)//6
maxl = M.bit_length()
cards=[]
for _ in range(N):
ci=map(int,input().split())
next(ci)
cards.append( set(c-1 for c in ci) )
RES = dict()
for a in range(N):
mask = 1
cadrsMasks = [0]*M
for b in range(a, min(a+maxl, N)):
allres = [0] * (1<<(b-a+1))
for card in range(M):
cardmask = cadrsMasks[card]
if card in cards[b]:
cardmask |= mask
cadrsMasks[card] = cardmask
allres[cardmask] += (card+1)*(card+1)
res = min(allres)
RES[a,b] = res
mask<<= 1
for _ in range(Q):
l,r=map(lambda x:int(x)-1,input().split())
l,r=min(l,r),max(l,r)
if r-l+1 > maxl:
print(CMAX)
else:
print(CMAX-RES[l,r])
solve()
| 25.422222 | 71 | 0.482517 |
14e25c187ac9d507547a8f8007ab34d29ac360f4
| 7,528 |
py
|
Python
|
kubernetes/client/models/v1beta1_controller_revision.py
|
iamneha/python
|
5b208a1a49a8d6f8bbab28bcc226b9ef793bcbd0
|
[
"Apache-2.0"
] | 1 |
2019-02-17T15:28:39.000Z
|
2019-02-17T15:28:39.000Z
|
kubernetes/client/models/v1beta1_controller_revision.py
|
iamneha/python
|
5b208a1a49a8d6f8bbab28bcc226b9ef793bcbd0
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1beta1_controller_revision.py
|
iamneha/python
|
5b208a1a49a8d6f8bbab28bcc226b9ef793bcbd0
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1ControllerRevision(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'data': 'RuntimeRawExtension',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'revision': 'int'
}
attribute_map = {
'api_version': 'apiVersion',
'data': 'data',
'kind': 'kind',
'metadata': 'metadata',
'revision': 'revision'
}
def __init__(self, api_version=None, data=None, kind=None, metadata=None, revision=None):
"""
V1beta1ControllerRevision - a model defined in Swagger
"""
self._api_version = None
self._data = None
self._kind = None
self._metadata = None
self._revision = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if data is not None:
self.data = data
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.revision = revision
@property
def api_version(self):
"""
Gets the api_version of this V1beta1ControllerRevision.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1beta1ControllerRevision.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1ControllerRevision.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1ControllerRevision.
:type: str
"""
self._api_version = api_version
@property
def data(self):
"""
Gets the data of this V1beta1ControllerRevision.
Data is the serialized representation of the state.
:return: The data of this V1beta1ControllerRevision.
:rtype: RuntimeRawExtension
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this V1beta1ControllerRevision.
Data is the serialized representation of the state.
:param data: The data of this V1beta1ControllerRevision.
:type: RuntimeRawExtension
"""
self._data = data
@property
def kind(self):
"""
Gets the kind of this V1beta1ControllerRevision.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1ControllerRevision.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1ControllerRevision.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1ControllerRevision.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1ControllerRevision.
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:return: The metadata of this V1beta1ControllerRevision.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1ControllerRevision.
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:param metadata: The metadata of this V1beta1ControllerRevision.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def revision(self):
"""
Gets the revision of this V1beta1ControllerRevision.
Revision indicates the revision of the state represented by Data.
:return: The revision of this V1beta1ControllerRevision.
:rtype: int
"""
return self._revision
@revision.setter
def revision(self, revision):
"""
Sets the revision of this V1beta1ControllerRevision.
Revision indicates the revision of the state represented by Data.
:param revision: The revision of this V1beta1ControllerRevision.
:type: int
"""
if revision is None:
raise ValueError("Invalid value for `revision`, must not be `None`")
self._revision = revision
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1ControllerRevision):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 31.366667 | 281 | 0.616897 |
9b9b36b5172eec6a60466972969fd09943dbabf9
| 5,296 |
py
|
Python
|
libraries/botbuilder-dialogs/botbuilder/dialogs/_user_token_access.py
|
andreikop/botbuilder-python
|
5e073e0c68fcbdc558133bdbd59a02453e597abe
|
[
"MIT"
] | 388 |
2019-05-07T15:53:21.000Z
|
2022-03-28T20:29:46.000Z
|
libraries/botbuilder-dialogs/botbuilder/dialogs/_user_token_access.py
|
andreikop/botbuilder-python
|
5e073e0c68fcbdc558133bdbd59a02453e597abe
|
[
"MIT"
] | 1,286 |
2019-05-07T23:38:19.000Z
|
2022-03-31T10:44:16.000Z
|
libraries/botbuilder-dialogs/botbuilder/dialogs/_user_token_access.py
|
andreikop/botbuilder-python
|
5e073e0c68fcbdc558133bdbd59a02453e597abe
|
[
"MIT"
] | 168 |
2019-05-14T20:23:25.000Z
|
2022-03-16T06:49:14.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from abc import ABC
from botbuilder.core import TurnContext
from botbuilder.core.bot_framework_adapter import TokenExchangeRequest
from botbuilder.core.oauth import ConnectorClientBuilder, ExtendedUserTokenProvider
from botbuilder.schema import TokenResponse
from botframework.connector import ConnectorClient
from botframework.connector.auth import ClaimsIdentity, ConnectorFactory
from botframework.connector.auth.user_token_client import UserTokenClient
from botframework.connector.token_api.models import SignInUrlResponse
from .prompts.oauth_prompt_settings import OAuthPromptSettings
class _UserTokenAccess(ABC):
@staticmethod
async def get_user_token(
turn_context: TurnContext, settings: OAuthPromptSettings, magic_code: str
) -> TokenResponse:
user_token_client: UserTokenClient = turn_context.turn_state.get(
UserTokenClient.__name__, None
)
if user_token_client:
return await user_token_client.get_user_token(
turn_context.activity.from_property.id,
settings.connection_name,
turn_context.activity.channel_id,
magic_code,
)
if isinstance(turn_context.adapter, ExtendedUserTokenProvider):
return await turn_context.adapter.get_user_token(
turn_context,
settings.connection_name,
magic_code,
settings.oath_app_credentials,
)
raise TypeError("OAuthPrompt is not supported by the current adapter")
@staticmethod
async def get_sign_in_resource(
turn_context: TurnContext, settings: OAuthPromptSettings
) -> SignInUrlResponse:
user_token_client: UserTokenClient = turn_context.turn_state.get(
UserTokenClient.__name__, None
)
if user_token_client:
return await user_token_client.get_sign_in_resource(
settings.connection_name, turn_context.activity, None
)
if isinstance(turn_context.adapter, ExtendedUserTokenProvider):
return await turn_context.adapter.get_sign_in_resource_from_user_and_credentials(
turn_context,
settings.oath_app_credentials,
settings.connection_name,
turn_context.activity.from_property.id
if turn_context.activity and turn_context.activity.from_property
else None,
)
raise TypeError("OAuthPrompt is not supported by the current adapter")
@staticmethod
async def sign_out_user(turn_context: TurnContext, settings: OAuthPromptSettings):
user_token_client: UserTokenClient = turn_context.turn_state.get(
UserTokenClient.__name__, None
)
if user_token_client:
return await user_token_client.sign_out_user(
turn_context.activity.from_property.id,
settings.connection_name,
turn_context.activity.channel_id,
)
if isinstance(turn_context.adapter, ExtendedUserTokenProvider):
return await turn_context.adapter.sign_out_user(
turn_context,
settings.connection_name,
turn_context.activity.from_property.id
if turn_context.activity and turn_context.activity.from_property
else None,
settings.oath_app_credentials,
)
raise TypeError("OAuthPrompt is not supported by the current adapter")
@staticmethod
async def exchange_token(
turn_context: TurnContext,
settings: OAuthPromptSettings,
token_exchange_request: TokenExchangeRequest,
) -> TokenResponse:
user_token_client: UserTokenClient = turn_context.turn_state.get(
UserTokenClient.__name__, None
)
user_id = turn_context.activity.from_property.id
if user_token_client:
channel_id = turn_context.activity.channel_id
return await user_token_client.exchange_token(
user_id, channel_id, token_exchange_request,
)
if isinstance(turn_context.adapter, ExtendedUserTokenProvider):
return await turn_context.adapter.exchange_token(
turn_context, settings.connection_name, user_id, token_exchange_request,
)
raise TypeError("OAuthPrompt is not supported by the current adapter")
@staticmethod
async def create_connector_client(
turn_context: TurnContext,
service_url: str,
claims_identity: ClaimsIdentity,
audience: str,
) -> ConnectorClient:
connector_factory: ConnectorFactory = turn_context.turn_state.get(
ConnectorFactory.__name__, None
)
if connector_factory:
return await connector_factory.create(service_url, audience)
if isinstance(turn_context.adapter, ConnectorClientBuilder):
return await turn_context.adapter.create_connector_client(
service_url, claims_identity, audience,
)
raise TypeError("OAuthPrompt is not supported by the current adapter")
| 41.054264 | 93 | 0.684101 |
03e2e3c2572281a7220cad71d31759826df4e371
| 2,993 |
py
|
Python
|
Assignment1/main.py
|
njucjc/machine-learning
|
1adcbad8d1e9f9a187036bec01d1a5ce798e44e0
|
[
"MIT"
] | null | null | null |
Assignment1/main.py
|
njucjc/machine-learning
|
1adcbad8d1e9f9a187036bec01d1a5ce798e44e0
|
[
"MIT"
] | null | null | null |
Assignment1/main.py
|
njucjc/machine-learning
|
1adcbad8d1e9f9a187036bec01d1a5ce798e44e0
|
[
"MIT"
] | null | null | null |
import numpy as np
import argparse, os
from utils import load_data, make_path
from pca import pca_train, pca_test
from svd import svd_train, svd_test
from isomap import isomap
from knn import knn
parser = argparse.ArgumentParser(description='Dimensionality Reduction')
parser.add_argument('--mode', type=str, default='train', help='train/test')
parser.add_argument('--train_data', type=str, default='sonar', help='train data source')
parser.add_argument('--test_data', type=str, default='sonar', help='test data source')
parser.add_argument('--alg', type=str, default='pca', help='pca/svd/isomap')
parser.add_argument('--output', type=str, default='output', help='result output dir')
parser.add_argument('--dim', type=int, default= 10, help='target dim')
args = parser.parse_args()
make_path(args.output)
train_data_path = os.path.join('data', args.train_data + '-train.txt')
test_data_path = os.path.join('data', args.test_data + '-test.txt')
def train():
train_data, _ = load_data(train_data_path)
if args.alg == 'pca':
mean_vecs, eig_vecs = pca_train(np.array(train_data), args.dim)
np.savez(os.path.join(args.output, 'pca' + str(args.dim) + '.npz'), mean_vecs=mean_vecs, eig_vecs=eig_vecs)
elif args.alg == 'svd':
v = svd_train(np.array(train_data), args.dim)
np.savez(os.path.join(args.output, 'svd' + str(args.dim) + '.npz'), v=v)
else:
pass
def test():
test_data, test_labels = load_data(test_data_path)
train_data, train_labels = load_data(train_data_path)
if args.alg == 'pca':
saved_data = np.load(os.path.join(args.output, args.alg + str(args.dim) + '.npz'))
mean_vecs = saved_data['mean_vecs']
eig_vecs = saved_data['eig_vecs']
reduction_test_data = pca_test(np.array(test_data), mean_vecs, eig_vecs)
reduction_train_data = pca_test(np.array(train_data), mean_vecs, eig_vecs)
elif args.alg == 'svd':
saved_data = np.load(os.path.join(args.output, args.alg + str(args.dim) + '.npz'))
v = saved_data['v']
reduction_test_data = svd_test(np.array(test_data), v)
reduction_train_data = svd_test(np.array(train_data), v)
else:
merge_data = train_data + test_data
result = isomap(np.array(merge_data), n=args.dim)
reduction_test_data = result[len(train_data):]
reduction_train_data = result[:len(train_data)]
acc = eval(reduction_test_data, test_labels, reduction_train_data, train_labels)
print('ACC = ' + str(acc))
def eval(test_data, test_data_label, train_data, train_data_label):
predict_label = []
for x in test_data:
label = knn(x, train_data, train_data_label, 1)
predict_label.append(label)
right = 0
size = len(test_data_label)
for i in range(size):
if predict_label[i] == test_data_label[i]:
right = right + 1
return right / size
if args.mode == 'train':
train()
elif args.mode == 'test':
test()
| 34.402299 | 115 | 0.676245 |
b9b103ade05f6b45e8770fe779edea288e4788e3
| 2,959 |
py
|
Python
|
logger/visualization.py
|
dong100136/pytorch-template
|
dc67fbc93401767d73a9307626bbf24ef259ec17
|
[
"MIT"
] | null | null | null |
logger/visualization.py
|
dong100136/pytorch-template
|
dc67fbc93401767d73a9307626bbf24ef259ec17
|
[
"MIT"
] | null | null | null |
logger/visualization.py
|
dong100136/pytorch-template
|
dc67fbc93401767d73a9307626bbf24ef259ec17
|
[
"MIT"
] | null | null | null |
import importlib
from datetime import datetime
class TensorboardWriter():
def __init__(self, log_dir, logger, enabled):
self.writer = None
self.selected_module = ""
if enabled:
log_dir = str(log_dir)
# Retrieve vizualization writer.
succeeded = False
for module in ["torch.utils.tensorboard", "tensorboardX"]:
try:
self.writer = importlib.import_module(module).SummaryWriter(log_dir)
succeeded = True
break
except ImportError:
succeeded = False
self.selected_module = module
if not succeeded:
message = "Warning: visualization (Tensorboard) is configured to use, but currently not installed on " \
"this machine. Please install TensorboardX with 'pip install tensorboardx', upgrade PyTorch to " \
"version >= 1.1 to use 'torch.utils.tensorboard' or turn off the option in the 'config.json' file."
logger.warning(message)
self.step = 0
self.mode = ''
self.tb_writer_ftns = {
'add_scalar', 'add_scalars', 'add_image', 'add_images', 'add_audio',
'add_text', 'add_histogram', 'add_pr_curve', 'add_embedding'
}
self.tag_mode_exceptions = {'add_histogram', 'add_embedding'}
self.timer = datetime.now()
def set_step(self, step, mode='train'):
self.mode = mode
self.step = step
if step == 0:
self.timer = datetime.now()
return 0
else:
duration = datetime.now() - self.timer
self.add_scalar('steps_per_sec', 1 / duration.total_seconds())
self.timer = datetime.now()
return 1/duration.total_seconds()
def __getattr__(self, name):
"""
If visualization is configured to use:
return add_data() methods of tensorboard with additional information (step, tag) added.
Otherwise:
return a blank function handle that does nothing
"""
if name in self.tb_writer_ftns:
add_data = getattr(self.writer, name, None)
def wrapper(tag, data, *args, **kwargs):
if add_data is not None:
# add mode(train/valid) tag
if name not in self.tag_mode_exceptions:
tag = '{}/{}'.format(tag, self.mode)
add_data(tag, data, self.step, *args, **kwargs)
return wrapper
else:
# default action for returning methods defined in this class, set_step() for instance.
try:
attr = object.__getattr__(name)
except AttributeError:
raise AttributeError("type object '{}' has no attribute '{}'".format(self.selected_module, name))
return attr
| 38.934211 | 120 | 0.560662 |
9dc0c8ca8f459e1ea410aed70a6dee1f1ccb0c4c
| 1,254 |
py
|
Python
|
venus/conf/elasticsearch.py
|
openstack/venus
|
7c3ec56142620cb2b3ff1562947c65a266401d57
|
[
"Apache-2.0"
] | null | null | null |
venus/conf/elasticsearch.py
|
openstack/venus
|
7c3ec56142620cb2b3ff1562947c65a266401d57
|
[
"Apache-2.0"
] | null | null | null |
venus/conf/elasticsearch.py
|
openstack/venus
|
7c3ec56142620cb2b3ff1562947c65a266401d57
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Inspur
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
elasticsearch_group = cfg.OptGroup(name='elasticsearch',
title='elasticsearch')
elasticsearch_opts = [
cfg.StrOpt('url',
default='',
help='the es url'),
cfg.StrOpt('username',
default='',
help='the es username'),
cfg.StrOpt('password',
default='',
help='the es password'),
cfg.IntOpt('es_index_days',
default=30,
help='the es log store days')
]
def register_opts(conf):
conf.register_group(elasticsearch_group)
conf.register_opts(elasticsearch_opts, elasticsearch_group)
| 32.153846 | 75 | 0.652313 |
de2a1e997df75afd5b831cf696a403dbd6cf5d4a
| 16,113 |
py
|
Python
|
posthog/models/event.py
|
bmann/posthog-foss
|
c794bb2666ce85cdc4b87940d2cd14604db8172e
|
[
"MIT"
] | 1 |
2020-12-08T04:04:52.000Z
|
2020-12-08T04:04:52.000Z
|
posthog/models/event.py
|
yaoshuyin/posthog
|
1835174c7b3015d0c47260dff98d4a752dc0abde
|
[
"MIT"
] | null | null | null |
posthog/models/event.py
|
yaoshuyin/posthog
|
1835174c7b3015d0c47260dff98d4a752dc0abde
|
[
"MIT"
] | null | null | null |
import copy
import datetime
import re
from collections import defaultdict
from typing import Any, Dict, List, Optional, Tuple, Union
import celery
from django.conf import settings
from django.contrib.postgres.fields import JSONField
from django.db import connection, models, transaction
from django.db.models import Exists, F, OuterRef, Prefetch, Q, QuerySet, Subquery
from django.forms.models import model_to_dict
from django.utils import timezone
from posthog.ee import is_ee_enabled
from .action import Action
from .action_step import ActionStep
from .element import Element
from .element_group import ElementGroup
from .filters import Filter
from .person import Person, PersonDistinctId
from .team import Team
from .utils import namedtuplefetchall
attribute_regex = r"([a-zA-Z]*)\[(.*)=[\'|\"](.*)[\'|\"]\]"
LAST_UPDATED_TEAM_ACTION: Dict[int, datetime.datetime] = {}
TEAM_EVENT_ACTION_QUERY_CACHE: Dict[int, Dict[str, tuple]] = defaultdict(dict)
# TEAM_EVENT_ACTION_QUERY_CACHE looks like team_id -> event ex('$pageview') -> query
TEAM_ACTION_QUERY_CACHE: Dict[int, str] = {}
class SelectorPart(object):
direct_descendant = False
unique_order = 0
def _unescape_class(self, class_name):
# separate all double slashes "\\" (replace them with "\") and remove all single slashes between them
return "\\".join([p.replace("\\", "") for p in class_name.split("\\\\")])
def __init__(self, tag: str, direct_descendant: bool, escape_slashes: bool):
self.direct_descendant = direct_descendant
self.data: Dict[str, Union[str, List]] = {}
self.ch_attributes: Dict[str, Union[str, List]] = {} # attributes for CH
result = re.search(attribute_regex, tag)
if result and "[id=" in tag:
self.data["attr_id"] = result[3]
self.ch_attributes["attr_id"] = result[3]
tag = result[1]
if result and "[" in tag:
self.data["attributes__attr__{}".format(result[2])] = result[3]
self.ch_attributes[result[2]] = result[3]
tag = result[1]
if "nth-child(" in tag:
parts = tag.split(":nth-child(")
self.data["nth_child"] = parts[1].replace(")", "")
self.ch_attributes["nth-child"] = self.data["nth_child"]
tag = parts[0]
if "." in tag:
parts = tag.split(".")
# strip all slashes that are not followed by another slash
self.data["attr_class__contains"] = [self._unescape_class(p) if escape_slashes else p for p in parts[1:]]
tag = parts[0]
if tag:
self.data["tag_name"] = tag
@property
def extra_query(self) -> Dict[str, List[Union[str, List[str]]]]:
where: List[Union[str, List[str]]] = []
params: List[Union[str, List[str]]] = []
for key, value in self.data.items():
if "attr__" in key:
where.append("(attributes ->> 'attr__{}') = %s".format(key.split("attr__")[1]))
else:
if "__contains" in key:
where.append("{} @> %s::varchar(200)[]".format(key.replace("__contains", "")))
else:
where.append("{} = %s".format(key))
params.append(value)
return {"where": where, "params": params}
class Selector(object):
parts: List[SelectorPart] = []
def __init__(self, selector: str, escape_slashes=True):
self.parts = []
# Sometimes people manually add *, just remove them as they don't do anything
selector = selector.replace("> * > ", "").replace("> *", "")
tags = re.split(" ", selector.strip())
tags.reverse()
for index, tag in enumerate(tags):
if tag == ">" or tag == "":
continue
direct_descendant = False
if index > 0 and tags[index - 1] == ">":
direct_descendant = True
part = SelectorPart(tag, direct_descendant, escape_slashes)
part.unique_order = len([p for p in self.parts if p.data == part.data])
self.parts.append(copy.deepcopy(part))
class EventManager(models.QuerySet):
def _element_subquery(self, selector: Selector) -> Tuple[Dict[str, Subquery], Dict[str, Union[F, bool]]]:
filter: Dict[str, Union[F, bool]] = {}
subqueries = {}
for index, tag in enumerate(selector.parts):
subqueries["match_{}".format(index)] = Subquery(
Element.objects.filter(group_id=OuterRef("pk"))
.values("order")
.order_by("order")
.extra(**tag.extra_query) # type: ignore
# If there's two of the same element, for the second one we need to shift one
[tag.unique_order : tag.unique_order + 1]
)
filter["match_{}__isnull".format(index)] = False
if index > 0:
# If direct descendant, the next element has to have order +1
if tag.direct_descendant:
filter["match_{}".format(index)] = F("match_{}".format(index - 1)) + 1
else:
# If not, it can have any order as long as it's bigger than current element
filter["match_{}__gt".format(index)] = F("match_{}".format(index - 1))
return (subqueries, filter)
def earliest_timestamp(self, team_id: int):
return (
self.filter(team_id=team_id)
.order_by("timestamp")[0]
.timestamp.replace(hour=0, minute=0, second=0, microsecond=0)
.isoformat()
)
def filter_by_element(self, filters: Dict, team_id: int):
groups = ElementGroup.objects.filter(team_id=team_id)
if filters.get("selector"):
selector = Selector(filters["selector"])
subqueries, filter = self._element_subquery(selector)
groups = groups.annotate(**subqueries) # type: ignore
else:
filter = {}
for key in ["tag_name", "text", "href"]:
if filters.get(key):
filter["element__{}".format(key)] = filters[key]
if not filter:
return {}
groups = groups.filter(**filter)
return {"elements_hash__in": groups.values_list("hash", flat=True)}
def filter_by_url(self, action_step: ActionStep, subquery: QuerySet):
if not action_step.url:
return subquery
if action_step.url_matching == ActionStep.EXACT:
where, param = "properties->>'$current_url' = %s", action_step.url
elif action_step.url_matching == ActionStep.REGEX:
where, param = "properties->>'$current_url' ~ %s", action_step.url
else:
where, param = "properties->>'$current_url' LIKE %s", f"%{action_step.url}%"
return subquery.extra(where=[where], params=[param])
def filter_by_event(self, action_step):
if not action_step.event:
return {}
return {"event": action_step.event}
def filter_by_period(self, start, end):
if not start and not end:
return {}
if not start:
return {"created_at__lte": end}
if not end:
return {"created_at__gte": start}
return {"created_at__gte": start, "created_at__lte": end}
def add_person_id(self, team_id: int):
return self.annotate(
person_id=Subquery(
PersonDistinctId.objects.filter(team_id=team_id, distinct_id=OuterRef("distinct_id"))
.order_by()
.values("person_id")[:1]
)
)
def query_db_by_action(self, action, order_by="-timestamp", start=None, end=None) -> models.QuerySet:
events = self
any_step = Q()
steps = action.steps.all()
if len(steps) == 0:
return self.none()
for step in steps:
subquery = (
Event.objects.add_person_id(team_id=action.team_id)
.filter(
Filter(data={"properties": step.properties}).properties_to_Q(team_id=action.team_id),
pk=OuterRef("id"),
**self.filter_by_event(step),
**self.filter_by_element(model_to_dict(step), team_id=action.team_id),
**self.filter_by_period(start, end),
)
.only("id")
)
subquery = self.filter_by_url(step, subquery)
any_step |= Q(Exists(subquery))
events = self.filter(team_id=action.team_id).filter(any_step)
if order_by:
events = events.order_by(order_by)
return events
def filter_by_action(self, action: Action, order_by: str = "-id") -> models.QuerySet:
events = self.filter(action=action).add_person_id(team_id=action.team_id)
if order_by:
events = events.order_by(order_by)
return events
def filter_by_event_with_people(self, event, team_id: int, order_by: str = "-id") -> models.QuerySet:
events = self.filter(team_id=team_id).filter(event=event).add_person_id(team_id=team_id)
if order_by:
events = events.order_by(order_by)
return events
def create(self, site_url: Optional[str] = None, *args: Any, **kwargs: Any):
with transaction.atomic():
if kwargs.get("elements"):
if kwargs.get("team"):
kwargs["elements_hash"] = ElementGroup.objects.create(
team=kwargs["team"], elements=kwargs.pop("elements")
).hash
else:
kwargs["elements_hash"] = ElementGroup.objects.create(
team_id=kwargs["team_id"], elements=kwargs.pop("elements")
).hash
event = super().create(*args, **kwargs)
# Matching actions to events can get very expensive to do as events are streaming in
# In a few cases we have had it OOM Postgres with the query it is running
# Short term solution is to have this be configurable to be run in batch
if not settings.ASYNC_EVENT_ACTION_MAPPING:
should_post_webhook = False
relations = []
for action in event.actions:
relations.append(action.events.through(action_id=action.pk, event_id=event.pk))
action.on_perform(event)
if action.post_to_slack:
should_post_webhook = True
Action.events.through.objects.bulk_create(relations, ignore_conflicts=True)
team = kwargs.get("team", event.team)
if (
should_post_webhook and team and team.slack_incoming_webhook and not is_ee_enabled()
): # ee will handle separately
celery.current_app.send_task("posthog.tasks.webhooks.post_event_to_webhook", (event.pk, site_url))
return event
class Event(models.Model):
class Meta:
indexes = [
models.Index(fields=["elements_hash"]),
models.Index(fields=["timestamp", "team_id", "event"]),
]
def _can_use_cached_query(self, last_updated_action_ts):
if not self.team_id in LAST_UPDATED_TEAM_ACTION:
return False
if not self.team_id in TEAM_EVENT_ACTION_QUERY_CACHE:
return False
if not self.event in TEAM_EVENT_ACTION_QUERY_CACHE[self.team_id]:
return False
if not self.team_id in TEAM_ACTION_QUERY_CACHE:
return False
# Cache is expired because actions were updated
if last_updated_action_ts > LAST_UPDATED_TEAM_ACTION[self.team_id]:
return False
return True
@property
def person(self):
return Person.objects.get(
team_id=self.team_id, persondistinctid__team_id=self.team_id, persondistinctid__distinct_id=self.distinct_id
)
# This (ab)uses query_db_by_action to find which actions match this event
# We can't use filter_by_action here, as we use this function when we create an event so
# the event won't be in the Action-Event relationship yet.
# We use query caching to reduce the time spent on generating redundant queries
@property
def actions(self) -> List:
last_updated_action_ts = Action.objects.filter(team_id=self.team_id).aggregate(models.Max("updated_at"))[
"updated_at__max"
]
actions = (
Action.objects.filter(
team_id=self.team_id, steps__event=self.event, deleted=False, # filter by event name to narrow down
)
.distinct("id")
.prefetch_related(Prefetch("steps", queryset=ActionStep.objects.order_by("id")))
)
if not self._can_use_cached_query(last_updated_action_ts):
TEAM_ACTION_QUERY_CACHE[self.team_id], _ = actions.query.sql_with_params()
if len(actions) == 0:
return []
events: models.QuerySet[Any] = Event.objects.filter(pk=self.pk)
for action in actions:
events = events.annotate(
**{
"action_{}".format(action.pk): Event.objects.filter(pk=self.pk)
.query_db_by_action(action)
.values("id")[:1]
}
)
# This block is a little cryptic so bear with me
# We grab the query and the params from the ORM here
q, p = events.query.sql_with_params()
# We then take the parameters and replace the event id's with a placeholder
# We use this later to sub back in future event id's
# The rest of the parameters are shared between action types
qp = tuple(["%s" if i == self.pk else i for i in p])
# Create a cache item and add it to the cache keyed on team_id and event id
qcache = {self.event: (q, qp)}
TEAM_EVENT_ACTION_QUERY_CACHE[self.team_id].update(qcache)
# Update the last updated team action timestamp for future reference
LAST_UPDATED_TEAM_ACTION[self.team_id] = last_updated_action_ts
else:
# If we have reached this block we are about to use the sql query cache
# Grab the actions using the cached action query
actions.raw(TEAM_ACTION_QUERY_CACHE[self.team_id])
# Grab the cached query and query params, we will need to replace some params
q, p = TEAM_EVENT_ACTION_QUERY_CACHE[self.team_id][self.event]
# Replace the query param placeholders with the event id (opposite of what we did above)
qp = tuple([self.pk if i == "%s" else i for i in p])
with connection.cursor() as cursor:
# Format and execute the cached query using the mostly cached params
qstring = cursor.mogrify(q, qp)
cursor.execute(qstring)
events = namedtuplefetchall(cursor)
event = [event for event in events][0]
filtered_actions = [action for action in actions if getattr(event, "action_{}".format(action.pk), None)]
return filtered_actions
created_at: models.DateTimeField = models.DateTimeField(auto_now_add=True, null=True, blank=True)
objects: EventManager = EventManager.as_manager() # type: ignore
team: models.ForeignKey = models.ForeignKey(Team, on_delete=models.CASCADE)
event: models.CharField = models.CharField(max_length=200, null=True, blank=True)
distinct_id: models.CharField = models.CharField(max_length=200)
properties: JSONField = JSONField(default=dict)
timestamp: models.DateTimeField = models.DateTimeField(default=timezone.now, blank=True)
elements_hash: models.CharField = models.CharField(max_length=200, null=True, blank=True)
# DEPRECATED: elements are stored against element groups now
elements: JSONField = JSONField(default=list, null=True, blank=True)
| 43.082888 | 120 | 0.604171 |
59540e95e2ab1c15142ed393e9c91338d70b9ccd
| 5,815 |
py
|
Python
|
p05-join.py
|
DingPang/cs451-practicals
|
f5545fbc9f6dfc9e6d6e3b32615af265e483b801
|
[
"BSD-3-Clause"
] | null | null | null |
p05-join.py
|
DingPang/cs451-practicals
|
f5545fbc9f6dfc9e6d6e3b32615af265e483b801
|
[
"BSD-3-Clause"
] | null | null | null |
p05-join.py
|
DingPang/cs451-practicals
|
f5545fbc9f6dfc9e6d6e3b32615af265e483b801
|
[
"BSD-3-Clause"
] | null | null | null |
"""
In this lab, we once again have a mandatory 'python' challenge.
Then we have a more open-ended Machine Learning 'see why' challenge.
This data is the "Is Wikipedia Literary" that I pitched.
You can contribute to science or get a sense of the data here: https://label.jjfoley.me/wiki
"""
import gzip, json
from shared import (
dataset_local_path,
TODO,
bootstrap_accuracy,
simple_boxplot,
bootstrap_auc,
)
from dataclasses import dataclass
from typing import Dict, List
"""
Problem 1: We have a copy of Wikipedia (I spared you the other 6 million pages).
It is separate from our labels we collected.
"""
@dataclass
class JustWikiPage:
title: str
wiki_id: str
body: str
# Load our pages into this pages list.
pages: List[JustWikiPage] = []
with gzip.open(dataset_local_path("tiny-wiki.jsonl.gz"), "rt") as fp:
for line in fp:
entry = json.loads(line)
pages.append(JustWikiPage(**entry))
@dataclass
class JustWikiLabel:
wiki_id: str
is_literary: bool
# Load our judgments/labels/truths/ys into this labels list:
labels: List[JustWikiLabel] = []
with open(dataset_local_path("tiny-wiki-labels.jsonl")) as fp:
for line in fp:
entry = json.loads(line)
labels.append(
JustWikiLabel(wiki_id=entry["wiki_id"], is_literary=entry["truth_value"])
)
@dataclass
class JoinedWikiData:
wiki_id: str
is_literary: bool
title: str
body: str
print(len(pages), len(labels))
# print(pages[0])
# print(labels[0])
joined_data: Dict[str, JoinedWikiData] = {}
labels_by_id: Dict[str, JustWikiLabel] = {}
for label in labels:
labels_by_id[label.wiki_id] = label
for page in pages:
if page.wiki_id not in labels_by_id:
print("missing labels for page:", page.wiki_id)
continue
label_for_page = labels_by_id[page.wiki_id]
full_row = JoinedWikiData(
page.wiki_id, label_for_page.is_literary, page.title, page.body
)
joined_data[full_row.wiki_id] = full_row
# TODO("1. create a list of JoinedWikiData from the ``pages`` and ``labels`` lists.")
# This challenge has some very short solutions, so it's more conceptual. If you're stuck after ~10-20 minutes of thinking, ask!
############### Problem 1 ends here ###############
# Make sure it is solved correctly!
assert len(joined_data) == len(pages)
assert len(joined_data) == len(labels)
# Make sure it has *some* positive labels!
assert sum([1 for d in joined_data.values() if d.is_literary]) > 0
# Make sure it has *some* negative labels!
assert sum([1 for d in joined_data.values() if not d.is_literary]) > 0
# Construct our ML problem:
ys = []
examples = []
for wiki_data in joined_data.values():
ys.append(wiki_data.is_literary)
examples.append(wiki_data.body)
## We're actually going to split before converting to features now...
from sklearn.model_selection import train_test_split
import numpy as np
RANDOM_SEED = 1234
## split off train/validate (tv) pieces.
ex_tv, ex_test, y_tv, y_test = train_test_split(
examples,
ys,
train_size=0.75,
shuffle=True,
random_state=RANDOM_SEED,
)
# split off train, validate from (tv) pieces.
ex_train, ex_vali, y_train, y_vali = train_test_split(
ex_tv, y_tv, train_size=0.66, shuffle=True, random_state=RANDOM_SEED
)
## Convert to features, train simple model (TFIDF will be explained eventually.)
from sklearn.feature_extraction.text import TfidfVectorizer
# Only learn columns for words in the training data, to be fair.
word_to_column = TfidfVectorizer(
strip_accents="unicode", lowercase=True, stop_words="english", max_df=0.5
)
word_to_column.fit(ex_train)
# Test words should surprise us, actually!
X_train = word_to_column.transform(ex_train)
X_vali = word_to_column.transform(ex_vali)
X_test = word_to_column.transform(ex_test)
print("Ready to Learn!")
from sklearn.linear_model import LogisticRegression, SGDClassifier, Perceptron
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
models = {
"SGDClassifier": SGDClassifier(),
"Perceptron": Perceptron(),
"LogisticRegression": LogisticRegression(),
"DTree": DecisionTreeClassifier(),
}
# 2A
for d in range(1, 20):
models["DTree{}".format(d)] = DecisionTreeClassifier(max_depth=d)
# 2B
models["rForest"] = RandomForestClassifier()
for name, m in models.items():
m.fit(X_train, y_train)
print("{}:".format(name))
print("\tVali-Acc: {:.3}".format(m.score(X_vali, y_vali)))
if hasattr(m, "decision_function"):
scores = m.decision_function(X_vali)
else:
scores = m.predict_proba(X_vali)[:, 1]
print("\tVali-AUC: {:.3}".format(roc_auc_score(y_score=scores, y_true=y_vali)))
"""
Results should be something like:
SGDClassifier:
Vali-Acc: 0.84
Vali-AUC: 0.879
Perceptron:
Vali-Acc: 0.815
Vali-AUC: 0.844
LogisticRegression:
Vali-Acc: 0.788
Vali-AUC: 0.88
DTree:
Vali-Acc: 0.739
Vali-AUC: 0.71
"""
# TODO("2. Explore why DecisionTrees are not beating linear models. Answer one of:")
# TODO("2.A. Is it a bad depth?")
# TODO("2.B. Do Random Forests do better?")
# TODO(
# "2.C. Is it randomness? Use simple_boxplot and bootstrap_auc/bootstrap_acc to see if the differences are meaningful!"
# )
# TODO("2.D. Is it randomness? Control for random_state parameters!")
"""
2.A. The depth seems to not be the problem, becauss changing the depth doesn't seem to improve the scores by a lot
2.B. Randon Forest is doing better than the decision trees but it is still slightly worse than liner SGD classifer,
I think linear model is doing better because the datasets may just work better with one line as the "classification" on
is_literary.
"""
| 29.221106 | 127 | 0.710232 |
65751af607f0448b627ff9ab27ac77769871bf01
| 389 |
py
|
Python
|
pathMaker.py
|
patwoz/phototimer
|
4cb661314dce3f05fcbb33e0b7514b699bc62974
|
[
"MIT"
] | 73 |
2016-12-13T01:59:32.000Z
|
2022-03-15T05:44:47.000Z
|
pathMaker.py
|
patwoz/phototimer
|
4cb661314dce3f05fcbb33e0b7514b699bc62974
|
[
"MIT"
] | 7 |
2017-03-23T01:16:57.000Z
|
2022-01-30T16:56:06.000Z
|
pathMaker.py
|
patwoz/phototimer
|
4cb661314dce3f05fcbb33e0b7514b699bc62974
|
[
"MIT"
] | 29 |
2017-03-22T04:07:59.000Z
|
2022-01-02T22:31:16.000Z
|
import os
class pathMaker:
def __init__(self):
pass
def get_parts(self, totalPath):
if(totalPath==None):
return []
return totalPath.strip("/").split("/")
def get_paths(self, totalPath):
parts = self.get_parts(totalPath)
paths =[]
path = "/"
for p in parts:
path = os.path.join(path,p)
paths.append(path)
return paths
def make_path(self, totalPath):
pass
| 16.913043 | 40 | 0.663239 |
f0bc9d4ab6778b87d14e20b7e9fe80e373b7211f
| 3,982 |
py
|
Python
|
app/v1/models.py
|
Nyakaru/Store-Manager-Backend
|
e6240eac541632760f4a74e3e27a83ae0d2d3be5
|
[
"MIT"
] | null | null | null |
app/v1/models.py
|
Nyakaru/Store-Manager-Backend
|
e6240eac541632760f4a74e3e27a83ae0d2d3be5
|
[
"MIT"
] | 4 |
2018-10-17T12:07:41.000Z
|
2021-06-01T22:50:14.000Z
|
app/v1/models.py
|
Nyakaru/Store-Manager-Backend
|
e6240eac541632760f4a74e3e27a83ae0d2d3be5
|
[
"MIT"
] | 1 |
2019-01-22T16:54:57.000Z
|
2019-01-22T16:54:57.000Z
|
'''Models and their methods.'''
from hashlib import sha256
from os import getenv
import jwt
class DB():
'''In memory database.'''
def __init__(self):
'''Create an empty database.'''
self.users = []
self.products = []
self.sales = []
def drop(self):
'''Drop entire database.'''
self.__init__()
db = DB()
class Base():
'''Base class to be inherited by other models.'''
def save(self):
'''Add object to database.'''
try:
self.id = getattr(db, self.tablename)[-1]['id']+1
except IndexError:
self.id = 1
current = self.current()
getattr(db, self.tablename).append(current)
return self.view()
def view(self):
'''View object as a dictionary.'''
return self.current
class Product(Base):
'''Product model.'''
def __init__(self, name, price):
'''Initialize a product.'''
self.id = 0
self.name = name
self.price = price
self.tablename = 'products'
def current(self):
'''Current product'''
current = {
"name": self.name,
"price": self.price,
"id": self.id
}
return current
def view(self):
'''View product's information.'''
return {
"name": self.name,
"price": self.price,
"id": self.id
}
class Sale(Base):
'''Sales model.'''
def __init__(self, name, price):
'''Initialize a sale.'''
self.id = 0
self.name = name
self.price = price
self.tablename = 'sales'
def current(self):
'''Current sale'''
current = {
"name": self.name,
"price": self.price,
"id": self.id
}
return current
def view(self):
'''View a user's information.'''
return {
"name": self.name,
"price": self.price,
"id": self.id
}
class User(Base):
'''User model.'''
def __init__(self, username, password, email, is_admin, is_attendant, id=None):
'''Initialize a user.'''
self.id = 0
self.username = username
self.email = email
self.password = self.make_hash(password)
if str(is_admin) == 'True':
self.is_admin = True
else:
self.is_admin = False
if str(is_attendant) == 'True':
self.is_attendant = True
else:
self.is_attendant = False
self.tablename = 'users'
def current(self):
'''Current user'''
current = {
'username': self.username,
'email': self.email,
'password': self.password,
'is_attendant': self.is_attendant,
'is_admin': self.is_admin,
'id': self.id
}
return current
def view(self):
'''View a user's information.'''
return {
'username': self.username,
'email': self.email,
'is_admin': self.is_admin,
'is_attendant': self.is_attendant,
'id': self.id
}
def make_hash(self, password):
'''Generate hash of password.'''
return sha256(password.encode('utf-8')).hexdigest()
def generate_token(self):
'''Create a token for a user.'''
key = getenv('APP_SECRET_KEY')
payload = {
'user_id': self.id,
'username': self.username,
'is_admin': self.is_admin,
'is_attendant': self.is_attendant
}
return jwt.encode(payload=payload, key=str(key), algorithm='HS256').decode('utf-8')
@staticmethod
def decode_token(token):
'''View information inside a token.'''
return jwt.decode(token, key=str(getenv('APP_SECRET_KEY')))
def check_password(self, password):
'''Validate a user's password.'''
| 22.625 | 91 | 0.514314 |
f4f1ed89badcc1462265ab83bd30f76c000b791f
| 4,243 |
py
|
Python
|
samples/web/content/apprtc/apprtc_test.py
|
alvestrand/webrtc
|
e08695776b058fe781a2ff5259b648a89ea0e548
|
[
"BSD-3-Clause"
] | null | null | null |
samples/web/content/apprtc/apprtc_test.py
|
alvestrand/webrtc
|
e08695776b058fe781a2ff5259b648a89ea0e548
|
[
"BSD-3-Clause"
] | null | null | null |
samples/web/content/apprtc/apprtc_test.py
|
alvestrand/webrtc
|
e08695776b058fe781a2ff5259b648a89ea0e548
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2014 Google Inc. All Rights Reserved.
import unittest
import webtest
import apprtc
import json
from google.appengine.api import memcache
from google.appengine.ext import testbed
class AppRtcUnitTest(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
def tearDown(self):
self.testbed.deactivate()
def testGenerateRandomGeneratesStringOfRightLength(self):
self.assertEqual(17, len(apprtc.generate_random(17)))
self.assertEqual(23, len(apprtc.generate_random(23)))
class AppRtcPageHandlerTest(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_memcache_stub()
self.test_app = webtest.TestApp(apprtc.app)
def tearDown(self):
self.testbed.deactivate()
def makeGetRequest(self, path):
# PhantomJS uses WebKit, so Safari is closest to the thruth.
return self.test_app.get(path, headers={'User-Agent': 'Safari'})
def makePostRequest(self, path, body=''):
return self.test_app.post(path, body, headers={'User-Agent': 'Safari'})
def verifyRegisterSuccessResponse(self, response, is_initiator, room_id):
self.assertEqual(response.status_int, 200)
response_json = json.loads(response.body)
self.assertEqual('SUCCESS', response_json['result'])
params = response_json['params']
caller_id = params['client_id']
self.assertTrue(len(caller_id) > 0)
self.assertEqual(json.dumps(is_initiator), params['is_initiator'])
self.assertEqual(room_id, params['room_id'])
self.assertEqual([], params['error_messages'])
return caller_id
def testConnectingWithoutRoomIdRedirectsToGeneratedRoom(self):
response = self.makeGetRequest('/')
self.assertEqual(response.status_int, 302)
redirect_location = response.headers['Location']
self.assertRegexpMatches(redirect_location, '^http://localhost/r/[\d]+$')
def testRegisterAndBye(self):
room_id = 'foo'
# Register the caller.
response = self.makePostRequest('/register/' + room_id)
caller_id = self.verifyRegisterSuccessResponse(response, True, room_id)
# Register the callee.
response = self.makePostRequest('/register/' + room_id)
callee_id = self.verifyRegisterSuccessResponse(response, False, room_id)
# The third user will get an error.
response = self.makePostRequest('/register/' + room_id)
self.assertEqual(response.status_int, 200)
response_json = json.loads(response.body)
self.assertEqual('FULL', response_json['result'])
# The caller and the callee leave.
self.makePostRequest('/bye/' + room_id + '/' + caller_id)
self.makePostRequest('/bye/' + room_id + '/' + callee_id)
# Another user becomes the new caller.
response = self.makePostRequest('/register/' + room_id)
caller_id = self.verifyRegisterSuccessResponse(response, True, room_id)
self.makePostRequest('/bye/' + room_id + '/' + caller_id)
def testCallerMessagesForwardedToCallee(self):
room_id = 'foo'
# Register the caller.
response = self.makePostRequest('/register/' + room_id)
caller_id = self.verifyRegisterSuccessResponse(response, True, room_id)
# Caller's messages should be saved.
messages = ['1', '2', '3']
path = '/message/' + room_id + '/' + caller_id
for msg in messages:
response = self.makePostRequest(path, msg)
response_json = json.loads(response.body)
self.assertEqual('SUCCESS', response_json['result'])
response = self.makePostRequest('/register/' + room_id)
callee_id = self.verifyRegisterSuccessResponse(response, False, room_id)
received_msgs = json.loads(response.body)['params']['messages']
self.assertEqual(messages, received_msgs)
self.makePostRequest('/bye/' + room_id + '/' + caller_id)
self.makePostRequest('/bye/' + room_id + '/' + callee_id)
if __name__ == '__main__':
unittest.main()
| 35.957627 | 77 | 0.716003 |
a6b639406d1f0b8b2ebddb502a6b8be24eb0c6f0
| 422 |
py
|
Python
|
venv/Scripts/pip3.6-script.py
|
fachrurRz/raspi-webserver
|
b0c5bd0c76138b35aac228eb7b0af47a4f7771ed
|
[
"Apache-2.0"
] | null | null | null |
venv/Scripts/pip3.6-script.py
|
fachrurRz/raspi-webserver
|
b0c5bd0c76138b35aac228eb7b0af47a4f7771ed
|
[
"Apache-2.0"
] | null | null | null |
venv/Scripts/pip3.6-script.py
|
fachrurRz/raspi-webserver
|
b0c5bd0c76138b35aac228eb7b0af47a4f7771ed
|
[
"Apache-2.0"
] | null | null | null |
#!C:\Users\fachr\PycharmProjects\raspi-webserver\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3.6'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3.6')()
)
| 32.461538 | 72 | 0.670616 |
e7924a5691252f59760c8d60cddebe77ef542010
| 912 |
py
|
Python
|
sources/rnt/mediane/migrations/0004_auto_20171221_0333.py
|
bryan-brancotte/rank-aggregation-with-ties
|
15fffb0b1bee3d6cef7090486a7c910e5f51195d
|
[
"Apache-2.0"
] | null | null | null |
sources/rnt/mediane/migrations/0004_auto_20171221_0333.py
|
bryan-brancotte/rank-aggregation-with-ties
|
15fffb0b1bee3d6cef7090486a7c910e5f51195d
|
[
"Apache-2.0"
] | 11 |
2018-04-04T08:24:30.000Z
|
2021-03-19T21:45:04.000Z
|
sources/rnt/mediane/migrations/0004_auto_20171221_0333.py
|
bryan-brancotte/rank-aggregation-with-ties
|
15fffb0b1bee3d6cef7090486a7c910e5f51195d
|
[
"Apache-2.0"
] | 1 |
2018-10-25T09:13:41.000Z
|
2018-10-25T09:13:41.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-21 03:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mediane', '0003_auto_20171219_2309'),
]
operations = [
migrations.AddField(
model_name='distance',
name='key_name',
field=models.CharField(default='', max_length=128),
preserve_default=False,
),
migrations.AddField(
model_name='distance',
name='key_name_is_read_only',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='dataset',
name='transient',
field=models.BooleanField(default=False, help_text='Should the dataset be deleted when the associated job is removed?'),
),
]
| 28.5 | 132 | 0.60636 |
326da98954558ddee3749af757cd6140696dbcb1
| 5,244 |
py
|
Python
|
parcels_valuation/src/init_classification.py
|
dykra/railwayPath
|
44a2e256fde7c6439e77e3d713b5ef44c4d0e3bb
|
[
"MIT"
] | null | null | null |
parcels_valuation/src/init_classification.py
|
dykra/railwayPath
|
44a2e256fde7c6439e77e3d713b5ef44c4d0e3bb
|
[
"MIT"
] | null | null | null |
parcels_valuation/src/init_classification.py
|
dykra/railwayPath
|
44a2e256fde7c6439e77e3d713b5ef44c4d0e3bb
|
[
"MIT"
] | null | null | null |
import argparse
import csv
from os.path import abspath
from sklearn.model_selection import train_test_split
from configuration.configuration_constants import excluded_values, \
file_name_predicted_price_categories_values, \
target_column_name, \
limit_date, \
path_to_trained_models
from classification_module import CalculateValue, \
ClassificationLogisticRegressionModel, \
get_model
from utils.database_handler import DatabaseHandler
from utils.file_names_builder import make_file_name
from utils.serialization_module import create_logger
query_step_iterate = 200000
parser = argparse.ArgumentParser(description='Program to predict category of land price.')
parser.add_argument('--save_to_database',
action='store_true',
default=False,
help='Specify whether to save the values into the database.')
parser.add_argument('--model_overwrite',
action='store_true',
default=False,
help='Specify whether to override the model.')
logger = create_logger()
def classification_regression_with_test_set():
database_handler = DatabaseHandler()
query = "EXEC dbo.GetDataToTrainClassificationModel @LimitDate = {}, @ExcludedList ='{}'".format(limit_date,
excluded_values)
data = database_handler.execute_query(query)
train, test = train_test_split(data, test_size=0.2)
model = ClassificationLogisticRegressionModel(input_data=train, target_column_name=target_column_name)
prediction = CalculateValue(model).predict(data_to_predict=test)
from sklearn.metrics import accuracy_score
print(accuracy_score(y_true=test[target_column_name], y_pred=prediction))
database_handler.close_connection()
for predictionItem, realItem in zip(prediction, test[target_column_name]):
if predictionItem != realItem:
print(predictionItem)
print(realItem)
print("\n")
def classification_regression(save_to_database=False, overwrite_model=False):
database_handler = DatabaseHandler()
model_file_name = make_file_name(base_name=path_to_trained_models + "classification_",
_limit_date=limit_date,
extension='.sav')
model = get_model("EXEC dbo.GetDataToTrainClassificationModel @LimitDate = {}, @ExcludedList ='{}'"
.format(limit_date, excluded_values),
target_column=target_column_name,
model_file_name=abspath(model_file_name),
database_handler=database_handler,
overwrite=overwrite_model)
min_max_object_id = \
database_handler.execute_query("EXEC dbo.GetMinimumAndMaxumimObjectID_ParcelVectors "
"@LimitDate = {}, @ExcludedList ='{}'"
.format(limit_date, excluded_values))
min_object_id = min_max_object_id.iloc[0]["MinimumObjectID"]
max_object_id = min_max_object_id.iloc[0]["MaximumObjectID"]
try:
with open(make_file_name(file_name_predicted_price_categories_values, extension='.csv'), mode='a') \
as estimated_bucket_values:
estimated_bucket_writer = csv.writer(estimated_bucket_values,
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
tmp_min = min_object_id
while tmp_min < max_object_id:
if tmp_min + query_step_iterate < max_object_id:
tmp_max = tmp_min + query_step_iterate
else:
tmp_max = max_object_id
df_parcels_to_estimate_price_group = database_handler.execute_query(
"EXEC dbo.GetDataToParcelClassification "
"@LimitDate = {}, @ExcludedList='{}', @ObjectIdMin = {}, @ObjectIdMax = {}"
.format(limit_date, excluded_values, tmp_min, tmp_max))
prediction = CalculateValue(model).predict(data_to_predict=df_parcels_to_estimate_price_group)
for (prediction_value, object_id) in zip(prediction, df_parcels_to_estimate_price_group['OBJECTID']):
if save_to_database:
query = ("EXEC dbo.UpdateEstimatedPriceCategoryGroup "
"@NEW_Estimated_Price_Group = {}, @ObjectID = {} "
.format(prediction_value, object_id))
database_handler.cursor.execute(query)
database_handler.conn.commit()
estimated_bucket_writer.writerow([object_id, prediction_value])
tmp_min = tmp_max
finally:
database_handler.close_connection()
logger.info('Classification prediction is done.')
if __name__ == '__main__':
args = parser.parse_args()
classification_regression(save_to_database=args.save_to_database, overwrite_model=args.model_overwrite)
| 48.555556 | 117 | 0.632151 |
44cbbf771684769184b9bb9d10e5fd2e8f06a165
| 1,918 |
py
|
Python
|
flaskext/color.py
|
Frozenball/flask-color
|
0ecea1cfe37570e9286cada9209674bd0b0f6d7c
|
[
"MIT"
] | 16 |
2015-07-18T17:10:03.000Z
|
2019-05-22T17:09:44.000Z
|
flaskext/color.py
|
Teemu/flask-color
|
0ecea1cfe37570e9286cada9209674bd0b0f6d7c
|
[
"MIT"
] | 1 |
2016-08-20T21:24:17.000Z
|
2018-04-27T20:36:54.000Z
|
flaskext/color.py
|
Teemu/flask-color
|
0ecea1cfe37570e9286cada9209674bd0b0f6d7c
|
[
"MIT"
] | 2 |
2019-11-07T08:29:29.000Z
|
2020-06-05T13:12:37.000Z
|
# -*- coding: utf-8 -*-
"""
flaskext.color
~~~~~~~~~~~~~~
Colors the requests in debugging mode
:copyright: (c) 2014 by Frozenball.
:license: MIT, see LICENSE for more details.
"""
import time
import re
class TerminalColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
GRAY = '\033[1;30m'
LITTLEGRAY = '\033[1;30m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def init_app(app):
if not (
app.config['DEBUG'] or
app.config.get('COLOR_ALWAYS_ON', False)
):
return
import werkzeug.serving
staticPattern = app.config.get(
'COLOR_PATTERN_GRAY',
r'^/(static|assets|img|js|css)/(.*)|favicon\.ico|(.*)\.(png|jpeg|jpg|gif|css)$'
)
hidePattern = app.config.get('COLOR_PATTERN_HIDE', r'/^$/')
WSGIRequestHandler = werkzeug.serving.WSGIRequestHandler
def log_request(self, code='-', size='-'):
url = self.requestline.split(" ")[1]
method = self.requestline.split(" ")[0]
if code == 200:
statusColor = TerminalColors.OKGREEN
elif str(code)[0] in ['4', '5']:
statusColor = TerminalColors.FAIL
else:
statusColor = TerminalColors.GRAY
if re.search(hidePattern, url):
return
print("{statusColor}{status}{colorEnd} {methodColor}{method}{colorEnd} {urlColor}{url}{colorEnd}".format(
status=code,
method=method,
url=url,
statusColor=statusColor,
colorEnd=TerminalColors.ENDC,
methodColor=TerminalColors.GRAY if method == 'GET' else TerminalColors.ENDC,
urlColor=TerminalColors.LITTLEGRAY if re.search(staticPattern, url) else TerminalColors.ENDC
))
WSGIRequestHandler.log_request = log_request
werkzeug.serving.WSGIRequestHandler = WSGIRequestHandler
| 29.060606 | 113 | 0.594891 |
56e3e64f3bf659d71b797c8e84fd99b31360e04c
| 1,878 |
py
|
Python
|
menu/generic.py
|
vlegoff/mud
|
88c9b6f020148e3fc9f3a58c372a0907e89384ff
|
[
"BSD-3-Clause"
] | null | null | null |
menu/generic.py
|
vlegoff/mud
|
88c9b6f020148e3fc9f3a58c372a0907e89384ff
|
[
"BSD-3-Clause"
] | null | null | null |
menu/generic.py
|
vlegoff/mud
|
88c9b6f020148e3fc9f3a58c372a0907e89384ff
|
[
"BSD-3-Clause"
] | null | null | null |
"""Module containing generic functions for EvMenu."""
def _formatter(nodetext, optionstext, caller=None):
"""Do not display the options, only the text.
This function is used by EvMenu to format the text of nodes.
Options are not displayed for this menu, where it doesn't often
make much sense to do so. Thus, only the node text is displayed.
"""
return nodetext
def _input_no_digit(menuobject, raw_string, caller):
"""
Process input.
Processes input much the same way the original function in
EvMenu operates, but if input is a number, consider it a
default choice.
Args:
menuobject (EvMenu): The EvMenu instance
raw_string (str): The incoming raw_string from the menu
command.
caller (Object, Player or Session): The entity using
the menu.
"""
cmd = raw_string.strip().lower()
if cmd.isdigit() and menuobject.default:
goto, callback = menuobject.default
menuobject.callback_goto(callback, goto, raw_string)
elif cmd in menuobject.options:
# this will take precedence over the default commands
# below
goto, callback = menuobject.options[cmd]
menuobject.callback_goto(callback, goto, raw_string)
elif menuobject.auto_look and cmd in ("look", "l"):
menuobject.display_nodetext()
elif menuobject.auto_help and cmd in ("help", "h"):
menuobject.display_helptext()
elif menuobject.auto_quit and cmd in ("quit", "q", "exit"):
menuobject.close_menu()
elif menuobject.default:
goto, callback = menuobject.default
menuobject.callback_goto(callback, goto, raw_string)
else:
caller.msg(_HELP_NO_OPTION_MATCH)
if not (menuobject.options or menuobject.default):
# no options - we are at the end of the menu.
menuobject.close_menu()
| 34.777778 | 69 | 0.672524 |
0d096b66ab79399ba0d158a5af7a2cf5784b00d7
| 844 |
py
|
Python
|
adminmgr/media/code/python/red1/BD_0913_0171_1120_0113_reducer.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 9 |
2019-11-08T02:05:27.000Z
|
2021-12-13T12:06:35.000Z
|
adminmgr/media/code/python/red1/BD_0913_0171_1120_0113_reducer.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 6 |
2019-11-27T03:23:16.000Z
|
2021-06-10T19:15:13.000Z
|
adminmgr/media/code/python/red1/BD_0913_0171_1120_0113_reducer.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 4 |
2019-11-26T17:04:27.000Z
|
2021-12-13T11:57:03.000Z
|
#!/usr/bin/python3
import sys
import csv
file_contents = sys.stdin
output_dict = dict()
for LINE in file_contents:
line = LINE.strip()
LIST = line.split(",")
batsman_bowler = str(LIST[0]) + "_" + str(LIST[1])
wickets = int(LIST[2])
if batsman_bowler in output_dict.keys():
wickets_deleveries = output_dict[batsman_bowler]
wickets_deleveries[0] = wickets_deleveries[0] + wickets #No. of wickets
wickets_deleveries[1] = wickets_deleveries[1] + 1 #No. of deleveries
else:
output_dict[batsman_bowler] = [wickets,1]
o = list()
for key in output_dict.keys():
if(output_dict[key][1] <= 5):
continue
bat_bowl_list = key.split("_")
wick_del_list = output_dict[key]
l1 = bat_bowl_list + wick_del_list
o.append(l1)
o.sort(key = lambda x: (-x[2],x[3],x[0],x[1]))
for l in o:
print(l[0],l[1],l[2],l[3],sep=",")
| 22.810811 | 74 | 0.674171 |
974721a1e802534d61c05f9a5922c72043bf964e
| 649 |
py
|
Python
|
Examples/CircuitPython/Audio/Play-Simple-Tone.py
|
CytronTechnologies/MAKER-NANO-RP2040
|
96ebf29b53833bc09251e3680a2ddbf7e83934ff
|
[
"MIT"
] | null | null | null |
Examples/CircuitPython/Audio/Play-Simple-Tone.py
|
CytronTechnologies/MAKER-NANO-RP2040
|
96ebf29b53833bc09251e3680a2ddbf7e83934ff
|
[
"MIT"
] | null | null | null |
Examples/CircuitPython/Audio/Play-Simple-Tone.py
|
CytronTechnologies/MAKER-NANO-RP2040
|
96ebf29b53833bc09251e3680a2ddbf7e83934ff
|
[
"MIT"
] | null | null | null |
# Plays a simple 8ksps 440 Hz sin wave
#
# Copy this file to Maker Nano RP2040 CIRCUITPY drive as code.py to run it on power up.
#
# This example code uses: Maker Nano RP2040
import audiocore
import audiopwmio
import board
import array
import time
import math
# Generate one period of sine wav.
length = 8000 // 440
sine_wave = array.array("H", [0] * length)
for i in range(length):
sine_wave[i] = int(math.sin(math.pi * 2 * i / length) * (2 ** 15) + 2 ** 15)
dac = audiopwmio.PWMAudioOut(board.GP18)
sine_wave = audiocore.RawSample(sine_wave, sample_rate=8000)
dac.play(sine_wave, loop=True)
time.sleep(1)
dac.stop()
| 27.041667 | 88 | 0.694915 |
46c3bbb6712c6276e48dd9328d7741a447f28b91
| 1,351 |
py
|
Python
|
python/paddle/fluid/tests/unittests/test_fetch_var.py
|
limeng357/Paddle
|
dbd25805c88c48998eb9dc0f4b2ca1fd46326482
|
[
"ECL-2.0",
"Apache-2.0"
] | 4 |
2019-04-28T13:29:41.000Z
|
2022-01-09T16:54:20.000Z
|
python/paddle/fluid/tests/unittests/test_fetch_var.py
|
limeng357/Paddle
|
dbd25805c88c48998eb9dc0f4b2ca1fd46326482
|
[
"ECL-2.0",
"Apache-2.0"
] | 3 |
2017-07-15T14:20:08.000Z
|
2019-05-06T03:16:54.000Z
|
python/paddle/fluid/tests/unittests/test_fetch_var.py
|
limeng357/Paddle
|
dbd25805c88c48998eb9dc0f4b2ca1fd46326482
|
[
"ECL-2.0",
"Apache-2.0"
] | 2 |
2020-11-04T08:01:39.000Z
|
2020-11-06T08:33:28.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import op_test
import numpy
import unittest
class TestFetchVar(op_test.OpTest):
def test_fetch_var(self):
val = numpy.array([1, 3, 5]).astype(numpy.int32)
x = layers.create_tensor(dtype="int32", persistable=True, name="x")
layers.assign(input=val, output=x)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_main_program(), feed={}, fetch_list=[])
fetched_x = fluid.fetch_var("x")
self.assertTrue(
numpy.array_equal(fetched_x, val),
"fetch_x=%s val=%s" % (fetched_x, val))
self.assertEqual(fetched_x.dtype, val.dtype)
if __name__ == '__main__':
unittest.main()
| 35.552632 | 75 | 0.703183 |
b0dddef602f2a708addae34069add138dc9920bf
| 15,296 |
py
|
Python
|
cumulusci/tasks/bulkdata/tests/test_delete.py
|
davisagli/CumulusCI
|
fd74c324ad3ff662484b159395c639879011e711
|
[
"BSD-3-Clause"
] | 109 |
2015-01-20T14:28:48.000Z
|
2018-08-31T12:12:39.000Z
|
cumulusci/tasks/bulkdata/tests/test_delete.py
|
davisagli/CumulusCI
|
fd74c324ad3ff662484b159395c639879011e711
|
[
"BSD-3-Clause"
] | 365 |
2015-01-07T19:54:25.000Z
|
2018-09-11T15:10:02.000Z
|
cumulusci/tasks/bulkdata/tests/test_delete.py
|
davisagli/CumulusCI
|
fd74c324ad3ff662484b159395c639879011e711
|
[
"BSD-3-Clause"
] | 125 |
2015-01-17T16:05:39.000Z
|
2018-09-06T19:05:00.000Z
|
from unittest import mock
import pytest
import responses
from cumulusci.core.exceptions import BulkDataException, TaskOptionsError
from cumulusci.tasks.bulkdata import DeleteData
from cumulusci.tasks.bulkdata.step import (
DataApi,
DataOperationJobResult,
DataOperationResult,
DataOperationStatus,
DataOperationType,
)
from cumulusci.tasks.bulkdata.tests.utils import _make_task
from cumulusci.tests.util import mock_describe_calls
class TestDeleteData:
@responses.activate
@mock.patch("cumulusci.tasks.bulkdata.delete.get_query_operation")
@mock.patch("cumulusci.tasks.bulkdata.delete.get_dml_operation")
def test_run(self, dml_mock, query_mock):
mock_describe_calls()
task = _make_task(DeleteData, {"options": {"objects": "Contact"}})
query_mock.return_value.get_results.return_value = iter(
["001000000000000", "001000000000001"]
)
query_mock.return_value.job_result = DataOperationJobResult(
DataOperationStatus.SUCCESS, [], 2, 0
)
dml_mock.return_value.get_results.return_value = iter(
[
DataOperationResult("001000000000000", True, None),
DataOperationResult("001000000000001", True, None),
]
)
dml_mock.return_value.job_result = DataOperationJobResult(
DataOperationStatus.SUCCESS, [], 0, 0
)
task()
query_mock.assert_called_once_with(
sobject="Contact",
fields=["Id"],
api_options={},
context=task,
query="SELECT Id FROM Contact",
api=DataApi.SMART,
)
query_mock.return_value.query.assert_called_once()
query_mock.return_value.get_results.assert_called_once()
dml_mock.assert_called_once_with(
sobject="Contact",
operation=DataOperationType.DELETE,
api_options={},
context=task,
fields=["Id"],
api=DataApi.SMART,
volume=2,
)
dml_mock.return_value.start.assert_called_once()
dml_mock.return_value.end.assert_called_once()
dml_mock.return_value.load_records.assert_called_once()
dml_mock.return_value.get_results.assert_called_once()
@responses.activate
@mock.patch("cumulusci.tasks.bulkdata.delete.get_query_operation")
@mock.patch("cumulusci.tasks.bulkdata.delete.get_dml_operation")
def test_run__no_results(self, dml_mock, query_mock):
mock_describe_calls()
task = _make_task(DeleteData, {"options": {"objects": "Contact"}})
query_mock.return_value.get_results.return_value = iter([])
query_mock.return_value.job_result = DataOperationJobResult(
DataOperationStatus.SUCCESS, [], 0, 0
)
task()
query_mock.assert_called_once_with(
sobject="Contact",
fields=["Id"],
api_options={},
context=task,
query="SELECT Id FROM Contact",
api=DataApi.SMART,
)
query_mock.return_value.query.assert_called_once()
query_mock.return_value.get_results.assert_not_called()
dml_mock.assert_not_called()
@responses.activate
@mock.patch("cumulusci.tasks.bulkdata.delete.get_query_operation")
@mock.patch("cumulusci.tasks.bulkdata.delete.get_dml_operation")
def test_run__job_error_delete(self, dml_mock, query_mock):
mock_describe_calls()
task = _make_task(DeleteData, {"options": {"objects": "Contact"}})
query_mock.return_value.get_results.return_value = iter(
["001000000000000", "001000000000001"]
)
query_mock.return_value.job_result = DataOperationJobResult(
DataOperationStatus.SUCCESS, [], 2, 0
)
dml_mock.return_value.get_results.return_value = iter(
[
DataOperationResult("001000000000000", True, None),
DataOperationResult("001000000000001", False, None),
]
)
with pytest.raises(BulkDataException):
task()
@responses.activate
@mock.patch("cumulusci.tasks.bulkdata.delete.get_query_operation")
@mock.patch("cumulusci.tasks.bulkdata.delete.get_dml_operation")
def test_run__job_error_query(self, dml_mock, query_mock):
mock_describe_calls()
task = _make_task(DeleteData, {"options": {"objects": "Contact"}})
query_mock.return_value.get_results.return_value = iter(
["001000000000000", "001000000000001"]
)
query_mock.return_value.job_result = DataOperationJobResult(
DataOperationStatus.JOB_FAILURE, [], 0, 0
)
with pytest.raises(BulkDataException):
task()
@responses.activate
@mock.patch("cumulusci.tasks.bulkdata.delete.get_query_operation")
@mock.patch("cumulusci.tasks.bulkdata.delete.get_dml_operation")
def test_run__row_error(self, dml_mock, query_mock):
mock_describe_calls()
task = _make_task(DeleteData, {"options": {"objects": "Contact"}})
query_mock.return_value.get_results.return_value = iter(
["001000000000000", "001000000000001"]
)
query_mock.return_value.job_result = DataOperationJobResult(
DataOperationStatus.SUCCESS, [], 2, 0
)
dml_mock.return_value.get_results.return_value = iter(
[
DataOperationResult("001000000000000", True, None),
DataOperationResult("001000000000001", False, None),
]
)
with pytest.raises(BulkDataException):
task()
@responses.activate
@mock.patch("cumulusci.tasks.bulkdata.delete.get_query_operation")
@mock.patch("cumulusci.tasks.bulkdata.delete.get_dml_operation")
def test_run__ignore_error(self, dml_mock, query_mock):
mock_describe_calls()
task = _make_task(
DeleteData,
{
"options": {
"objects": "Contact",
"ignore_row_errors": "true",
"hardDelete": "true",
}
},
)
query_mock.return_value.get_results.return_value = iter(
["001000000000000", "001000000000001"]
)
query_mock.return_value.job_result = DataOperationJobResult(
DataOperationStatus.SUCCESS, [], 2, 0
)
dml_mock.return_value.get_results.return_value = iter(
[
DataOperationResult("001000000000000", True, None),
DataOperationResult("001000000000001", False, None),
]
)
dml_mock.return_value.job_result = DataOperationJobResult(
DataOperationStatus.SUCCESS, [], 2, 0
)
with mock.patch.object(task.logger, "warning") as warning:
task()
assert len(warning.mock_calls) == 1
query_mock.assert_called_once_with(
sobject="Contact",
api_options={},
context=task,
query="SELECT Id FROM Contact",
api=DataApi.SMART,
fields=["Id"],
)
query_mock.return_value.query.assert_called_once()
query_mock.return_value.get_results.assert_called_once()
dml_mock.assert_called_once_with(
sobject="Contact",
operation=DataOperationType.HARD_DELETE,
api_options={},
context=task,
fields=["Id"],
api=DataApi.SMART,
volume=2,
)
dml_mock.return_value.start.assert_called_once()
dml_mock.return_value.end.assert_called_once()
dml_mock.return_value.load_records.assert_called_once()
dml_mock.return_value.get_results.assert_called_once()
@responses.activate
@mock.patch("cumulusci.tasks.bulkdata.delete.get_query_operation")
@mock.patch("cumulusci.tasks.bulkdata.delete.get_dml_operation")
def test_run__ignore_error_throttling(self, dml_mock, query_mock):
mock_describe_calls()
task = _make_task(
DeleteData,
{
"options": {
"objects": "Contact",
"ignore_row_errors": "true",
"hardDelete": "true",
}
},
)
query_mock.return_value.get_results.return_value = iter(
["001000000000000", "001000000000001"] * 15
)
query_mock.return_value.job_result = DataOperationJobResult(
DataOperationStatus.SUCCESS, [], 2, 0
)
dml_mock.return_value.get_results.return_value = iter(
[
DataOperationResult("001000000000000", True, None),
DataOperationResult("001000000000001", False, None),
]
* 15
)
dml_mock.return_value.job_result = DataOperationJobResult(
DataOperationStatus.SUCCESS, [], 2, 0
)
with mock.patch.object(task.logger, "warning") as warning:
task()
assert len(warning.mock_calls) == task.row_warning_limit + 1 == 11
assert "warnings suppressed" in str(warning.mock_calls[-1])
@responses.activate
@mock.patch("cumulusci.tasks.bulkdata.delete.get_query_operation")
@mock.patch("cumulusci.tasks.bulkdata.delete.get_dml_operation")
def test_run__where(self, dml_mock, query_mock):
mock_describe_calls()
task = _make_task(
DeleteData, {"options": {"objects": "Contact", "where": "Id != null"}}
)
query_mock.return_value.get_results.return_value = iter(
["001000000000000", "001000000000001"]
)
query_mock.return_value.job_result = DataOperationJobResult(
DataOperationStatus.SUCCESS, [], 2, 0
)
dml_mock.return_value.get_results.return_value = iter(
[
DataOperationResult("001000000000000", True, None),
DataOperationResult("001000000000001", True, None),
]
)
dml_mock.return_value.job_result = DataOperationJobResult(
DataOperationStatus.SUCCESS, [], 2, 0
)
task()
query_mock.assert_called_once_with(
sobject="Contact",
api_options={},
context=task,
query="SELECT Id FROM Contact WHERE Id != null",
fields=["Id"],
api=DataApi.SMART,
)
query_mock.return_value.query.assert_called_once()
query_mock.return_value.get_results.assert_called_once()
dml_mock.assert_called_once_with(
sobject="Contact",
operation=DataOperationType.DELETE,
api_options={},
context=task,
fields=["Id"],
api=DataApi.SMART,
volume=2,
)
dml_mock.return_value.start.assert_called_once()
dml_mock.return_value.end.assert_called_once()
dml_mock.return_value.load_records.assert_called_once()
dml_mock.return_value.get_results.assert_called_once()
@responses.activate
@mock.patch("cumulusci.tasks.bulkdata.delete.get_query_operation")
@mock.patch("cumulusci.tasks.bulkdata.delete.get_dml_operation")
def test_run__query_fails(self, dml_mock, query_mock):
mock_describe_calls()
task = _make_task(
DeleteData, {"options": {"objects": "Contact", "where": "Id != null"}}
)
query_mock.return_value.get_results.return_value = iter(
["001000000000000", "001000000000001"]
)
query_mock.return_value.job_result = DataOperationJobResult(
DataOperationStatus.JOB_FAILURE, [], 0, 0
)
with pytest.raises(BulkDataException):
task()
@responses.activate
def test_validate_and_inject_namespace__standard(self):
mock_describe_calls()
task = _make_task(DeleteData, {"options": {"objects": "Contact,Account"}})
task._validate_and_inject_namespace()
assert task.sobjects == ["Contact", "Account"]
@responses.activate
def test_validate_and_inject_namespace__missing_object(self):
mock_describe_calls()
task = _make_task(
DeleteData, {"options": {"objects": "ApexTestQueueItem,Account"}}
)
# ApexTestQueueItem is not deletable
with pytest.raises(TaskOptionsError):
task._validate_and_inject_namespace()
def test_validate_and_inject_namespace__packaged(self):
task = _make_task(DeleteData, {"options": {"objects": "Contact,Test__c"}})
task.project_config.project__package__namespace = "ns"
task.org_config = mock.Mock()
task.org_config.salesforce_client.describe.return_value = {
"sobjects": [
{"name": "ns__Test__c", "deletable": True},
{"name": "Contact", "deletable": True},
]
}
task._validate_and_inject_namespace()
assert task.sobjects == ["Contact", "ns__Test__c"]
def test_validate_and_inject_namespace__packaged_and_not(self):
task = _make_task(DeleteData, {"options": {"objects": "Contact,Test__c"}})
task.project_config.project__package__namespace = "ns"
task.org_config = mock.Mock()
task.org_config.salesforce_client.describe.return_value = {
"sobjects": [
{"name": "Test__c", "deletable": True},
{"name": "Contact", "deletable": True},
{"name": "ns__Test__c", "deletable": True},
]
}
task._validate_and_inject_namespace()
# Prefer the user entry where there is ambiguity.
assert task.sobjects == ["Contact", "Test__c"]
def test_object_description(self):
t = _make_task(DeleteData, {"options": {"objects": "a", "where": "Id != null"}})
assert t._object_description("a") == 'a objects matching "Id != null"'
t = _make_task(DeleteData, {"options": {"objects": "a"}})
assert t._object_description("a") == "all a objects"
def test_init_options(self):
with pytest.raises(TaskOptionsError):
_make_task(DeleteData, {"options": {"objects": ""}})
with pytest.raises(TaskOptionsError):
_make_task(DeleteData, {"options": {"objects": "a,b", "where": "x='y'"}})
with pytest.raises(TaskOptionsError):
_make_task(DeleteData, {"options": {"objects": "a", "api": "blah"}})
with pytest.raises(TaskOptionsError):
_make_task(
DeleteData,
{"options": {"objects": "a", "api": "rest", "hardDelete": True}},
)
t = _make_task(
DeleteData,
{
"options": {
"objects": "a",
"where": "Id != null",
"hardDelete": "true",
"ignore_row_errors": "false",
}
},
)
assert t.options["where"] == "Id != null"
assert not t.options["ignore_row_errors"]
assert t.options["hardDelete"]
t = _make_task(DeleteData, {"options": {"objects": "a,b"}})
assert t.options["objects"] == ["a", "b"]
| 37.767901 | 88 | 0.611271 |
a05ed2c72508b4e6dbe84f5ecfe1b8b59e7d2f54
| 12,153 |
py
|
Python
|
neutronclient/tests/unit/osc/v2/sfc/test_service_graph.py
|
casual-lemon/python-neutronclient
|
adf21f028827b8a4e32057334db4cd464c24c4d0
|
[
"Apache-2.0"
] | 120 |
2015-01-07T00:38:58.000Z
|
2021-12-26T13:05:53.000Z
|
neutronclient/tests/unit/osc/v2/sfc/test_service_graph.py
|
casual-lemon/python-neutronclient
|
adf21f028827b8a4e32057334db4cd464c24c4d0
|
[
"Apache-2.0"
] | 1 |
2021-08-11T18:42:30.000Z
|
2021-08-11T22:25:21.000Z
|
neutronclient/tests/unit/osc/v2/sfc/test_service_graph.py
|
casual-lemon/python-neutronclient
|
adf21f028827b8a4e32057334db4cd464c24c4d0
|
[
"Apache-2.0"
] | 153 |
2015-01-05T16:50:50.000Z
|
2021-09-13T12:01:23.000Z
|
# Copyright 2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from osc_lib import exceptions
from osc_lib.tests import utils as tests_utils
from neutronclient.osc.v2.sfc import sfc_service_graph
from neutronclient.tests.unit.osc.v2.sfc import fakes
def _get_id(client, id_or_name, resource):
return id_or_name
class TestListSfcServiceGraph(fakes.TestNeutronClientOSCV2):
_service_graphs = fakes.FakeSfcServiceGraph.create_sfc_service_graphs(
count=1)
columns = ('ID', 'Name', 'Branching Points')
columns_long = ('ID', 'Name', 'Branching Points', 'Description', 'Project')
_service_graph = _service_graphs[0]
data = [
_service_graph['id'],
_service_graph['name'],
_service_graph['port_chains']
]
data_long = [
_service_graph['id'],
_service_graph['name'],
_service_graph['port_chains'],
_service_graph['description'],
_service_graph['project_id']
]
_service_graph1 = {'service_graphs': _service_graph}
_service_graph_id = _service_graph['id']
def setUp(self):
super(TestListSfcServiceGraph, self).setUp()
mock.patch(
'neutronclient.osc.v2.sfc.sfc_service_graph._get_id',
new=_get_id).start()
self.neutronclient.list_sfc_service_graphs = mock.Mock(
return_value={'service_graphs': self._service_graphs}
)
# Get the command object to test
self.cmd = sfc_service_graph.ListSfcServiceGraph(
self.app, self.namespace)
def test_list_sfc_service_graphs(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns = self.cmd.take_action(parsed_args)[0]
sgs = self.neutronclient.list_sfc_service_graphs()['service_graphs']
sg = sgs[0]
data = [
sg['id'],
sg['name'],
sg['port_chains']
]
self.assertEqual(list(self.columns), columns)
self.assertEqual(self.data, data)
def test_list_sfc_service_graphs_with_long_option(self):
arglist = ['--long']
verifylist = [('long', True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns = self.cmd.take_action(parsed_args)[0]
sgs = self.neutronclient.list_sfc_service_graphs()['service_graphs']
sg = sgs[0]
data = [
sg['id'],
sg['name'],
sg['port_chains'],
sg['description'],
sg['project_id']
]
self.assertEqual(list(self.columns_long), columns)
self.assertEqual(self.data_long, data)
class TestCreateSfcServiceGraph(fakes.TestNeutronClientOSCV2):
_service_graph = fakes.FakeSfcServiceGraph.create_sfc_service_graph()
columns = ('ID', 'Name', 'Branching Points')
columns_long = ('Branching Points', 'Description', 'ID', 'Name', 'Project')
def get_data(self):
return (
self._service_graph['port_chains'],
self._service_graph['description'],
self._service_graph['id'],
self._service_graph['name'],
self._service_graph['project_id'],
)
def setUp(self):
super(TestCreateSfcServiceGraph, self).setUp()
mock.patch('neutronclient.osc.v2.sfc.sfc_service_graph._get_id',
new=_get_id).start()
self.neutronclient.create_sfc_service_graph = mock.Mock(
return_value={'service_graph': self._service_graph})
self.data = self.get_data()
self.cmd = sfc_service_graph.CreateSfcServiceGraph(
self.app, self.namespace)
def test_create_sfc_service_graph(self):
arglist = []
verifylist = []
self.assertRaises(tests_utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
def test_create_sfc_service_graph_without_loop(self):
bp1_str = 'pc1:pc2,pc3'
bp2_str = 'pc2:pc4'
self.cmd = sfc_service_graph.CreateSfcServiceGraph(
self.app, self.namespace)
arglist = [
"--description", self._service_graph['description'],
"--branching-point", bp1_str,
"--branching-point", bp2_str,
self._service_graph['name']]
pcs = {'pc1': ['pc2', 'pc3'], 'pc2': ['pc4']}
verifylist = [
("description", self._service_graph['description']),
("branching_points", [bp1_str, bp2_str]),
("name", self._service_graph['name'])
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.neutronclient.create_sfc_service_graph.assert_called_once_with({
'service_graph': {
'description': self._service_graph['description'],
'name': self._service_graph['name'],
'port_chains': pcs
}
})
self.assertEqual(self.columns_long, columns)
self.assertEqual(self.data, data)
def test_create_sfc_service_graph_with_loop(self):
bp1_str = 'pc1:pc2,pc3;'
bp2_str = 'pc2:pc1'
self.cmd = sfc_service_graph.CreateSfcServiceGraph(
self.app, self.namespace)
arglist = [
"--description", self._service_graph['description'],
"--branching-point", bp1_str,
"--branching-point", bp2_str,
self._service_graph['name']]
verifylist = [
("description", self._service_graph['description']),
("branching_points", [bp1_str, bp2_str]),
("name", self._service_graph['name'])
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(
exceptions.CommandError, self.cmd.take_action, parsed_args)
def test_create_sfc_service_graph_invalid_port_chains(self):
bp1_str = 'pc1:pc2,pc3:'
self.cmd = sfc_service_graph.CreateSfcServiceGraph(
self.app, self.namespace)
arglist = [
"--description", self._service_graph['description'],
"--branching-point", bp1_str,
self._service_graph['name']]
verifylist = [
("description", self._service_graph['description']),
("branching_points", [bp1_str]),
("name", self._service_graph['name'])
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(
exceptions.CommandError, self.cmd.take_action, parsed_args)
def test_create_sfc_service_graph_duplicate_src_chains(self):
bp1_str = 'pc1:pc2,pc3;'
bp2_str = 'pc1:pc4'
self.cmd = sfc_service_graph.CreateSfcServiceGraph(
self.app, self.namespace)
arglist = [
"--description", self._service_graph['description'],
"--branching-point", bp1_str,
"--branching-point", bp2_str,
self._service_graph['name']]
verifylist = [
("description", self._service_graph['description']),
("branching_points", [bp1_str, bp2_str]),
("name", self._service_graph['name'])
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(
exceptions.CommandError, self.cmd.take_action, parsed_args)
class TestDeleteSfcServiceGraph(fakes.TestNeutronClientOSCV2):
_service_graph = fakes.FakeSfcServiceGraph.create_sfc_service_graphs(
count=1)
def setUp(self):
super(TestDeleteSfcServiceGraph, self).setUp()
self.neutronclient.delete_sfc_service_graph = mock.Mock(
return_value=None)
self.cmd = sfc_service_graph.DeleteSfcServiceGraph(
self.app, self.namespace)
def test_delete_sfc_service_graph(self):
client = self.app.client_manager.neutronclient
mock_service_graph_delete = client.delete_sfc_service_graph
arglist = [
self._service_graph[0]['id'],
]
verifylist = [
('service_graph', self._service_graph[0]['id']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
mock_service_graph_delete.assert_called_once_with(
self._service_graph[0]['id'])
self.assertIsNone(result)
class TestShowSfcServiceGraph(fakes.TestNeutronClientOSCV2):
_sg = fakes.FakeSfcServiceGraph.create_sfc_service_graph()
columns = ('ID', 'Name', 'Branching Points')
columns_long = ('Branching Points', 'Description', 'ID', 'Name', 'Project')
data = (
_sg['id'],
_sg['name'],
_sg['port_chains']
)
data_long = (
_sg['port_chains'],
_sg['description'],
_sg['id'],
_sg['name'],
_sg['project_id']
)
_service_graph = {'service_graph': _sg}
_service_graph_id = _sg['id']
def setUp(self):
super(TestShowSfcServiceGraph, self).setUp()
mock.patch(
'neutronclient.osc.v2.sfc.sfc_service_graph._get_id',
new=_get_id).start()
self.neutronclient.show_sfc_service_graph = mock.Mock(
return_value=self._service_graph
)
# Get the command object to test
self.cmd = sfc_service_graph.ShowSfcServiceGraph(
self.app, self.namespace)
def test_service_graph_show(self):
client = self.app.client_manager.neutronclient
mock_service_graph_show = client.show_sfc_service_graph
arglist = [
self._service_graph_id,
]
verifylist = [
('service_graph', self._service_graph_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
mock_service_graph_show.assert_called_once_with(self._service_graph_id)
self.assertEqual(self.columns_long, columns)
self.assertEqual(self.data_long, data)
class TestSetSfcServiceGraph(fakes.TestNeutronClientOSCV2):
_service_graph = fakes.FakeSfcServiceGraph.create_sfc_service_graph()
_service_graph_name = _service_graph['name']
_service_graph_id = _service_graph['id']
def setUp(self):
super(TestSetSfcServiceGraph, self).setUp()
mock.patch('neutronclient.osc.v2.sfc.sfc_service_graph._get_id',
new=_get_id).start()
self.neutronclient.update_sfc_service_graph = mock.Mock(
return_value=None)
self.cmd = sfc_service_graph.SetSfcServiceGraph(
self.app, self.namespace)
def test_set_service_graph(self):
client = self.app.client_manager.neutronclient
mock_service_graph_update = client.update_sfc_service_graph
arglist = [
self._service_graph_name,
'--name', 'name_updated',
'--description', 'desc_updated'
]
verifylist = [
('service_graph', self._service_graph_name),
('name', 'name_updated'),
('description', 'desc_updated'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
attrs = {'service_graph': {
'name': 'name_updated',
'description': 'desc_updated'}
}
mock_service_graph_update.assert_called_once_with(
self._service_graph_name, attrs)
self.assertIsNone(result)
| 36.062315 | 79 | 0.630462 |
eff3821df4d80b5a44f55f1a4eef1fec407651c1
| 2,227 |
py
|
Python
|
scripts/generate_delta_sysroot_unittest.py
|
bpsinc-native/src_third_party_chromite
|
b07cf18203c98a14c59819387754428e887ca164
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/generate_delta_sysroot_unittest.py
|
bpsinc-native/src_third_party_chromite
|
b07cf18203c98a14c59819387754428e887ca164
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/generate_delta_sysroot_unittest.py
|
bpsinc-native/src_third_party_chromite
|
b07cf18203c98a14c59819387754428e887ca164
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for generate_delta_sysroot."""
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..', '..'))
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.scripts import generate_delta_sysroot as gds
# pylint: disable=W0212
def _Parse(argv):
return gds._ParseCommandLine(argv)
class InterfaceTest(cros_test_lib.OutputTestCase,
cros_test_lib.TempDirTestCase):
"""Test the commandline interface of the script"""
def testNoBoard(self):
"""Test no board specified."""
argv = ['--out-dir', '/path/to/nowhere']
self.assertParseError(argv)
def testNoOutDir(self):
"""Test no out dir specified."""
argv = ['--board', 'link']
self.assertParseError(argv)
def testCorrectArgv(self):
"""Test successful parsing"""
argv = ['--board', 'link', '--out-dir', self.tempdir]
options = _Parse(argv)
gds.FinishParsing(options)
def testTestsSet(self):
"""Test successful parsing"""
argv = ['--board', 'link', '--out-dir', self.tempdir]
options = _Parse(argv)
self.assertTrue(options.build_tests)
def testNoTestsSet(self):
"""Test successful parsing"""
argv = ['--board', 'link', '--out-dir', self.tempdir, '--skip-tests']
options = _Parse(argv)
self.assertFalse(options.build_tests)
def assertParseError(self, argv):
"""Helper to assert parsing error, given argv."""
with self.OutputCapturer():
self.assertRaises2(SystemExit, _Parse, argv)
class TestCreateBatchFile(cros_test_lib.TempDirTestCase):
"""Test the batch file creation."""
def testSourceDirDoesNotExist(self):
"""Test error is raised if there is no source directory."""
no_source = os.path.join(self.tempdir, 'foo/bar/cow')
self.assertRaises2(cros_build_lib.RunCommandError, gds.CreateBatchFile,
no_source, self.tempdir, os.path.join(self.tempdir,'batch'))
if __name__ == '__main__':
cros_test_lib.main()
| 29.693333 | 76 | 0.684329 |
2fde3a60040a326a1fee60ef83807be9badc3cf5
| 892 |
py
|
Python
|
pythonbytes/tutorials/migrations/0008_tutorial.py
|
hanztura/pythonbytes
|
414e3f1443f58ccc094b40622688ca234041fc1c
|
[
"MIT"
] | 1 |
2019-12-16T19:25:19.000Z
|
2019-12-16T19:25:19.000Z
|
pythonbytes/tutorials/migrations/0008_tutorial.py
|
hanztura/pythonbytes
|
414e3f1443f58ccc094b40622688ca234041fc1c
|
[
"MIT"
] | 9 |
2020-06-06T01:07:19.000Z
|
2022-02-10T09:01:57.000Z
|
pythonbytes/tutorials/migrations/0008_tutorial.py
|
hanztura/pythonbytes
|
414e3f1443f58ccc094b40622688ca234041fc1c
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.8 on 2019-12-17 06:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cms', '0022_auto_20180620_1551'),
('tutorials', '0007_delete_tutorial'),
]
operations = [
migrations.CreateModel(
name='Tutorial',
fields=[
('cmsplugin_ptr', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='tutorials_tutorial', serialize=False, to='cms.CMSPlugin')),
('body', models.TextField(verbose_name='body')),
('tutorial_title', models.CharField(max_length=200)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| 29.733333 | 209 | 0.591928 |
0a29bd6e49e764f1c4da93364e9ef0e2c220f3b3
| 2,707 |
py
|
Python
|
tests/modules/transformer/positional_encoding_test.py
|
MSLars/allennlp
|
2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475
|
[
"Apache-2.0"
] | 11,433 |
2017-06-27T03:08:46.000Z
|
2022-03-31T18:14:33.000Z
|
tests/modules/transformer/positional_encoding_test.py
|
MSLars/allennlp
|
2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475
|
[
"Apache-2.0"
] | 4,006 |
2017-06-26T21:45:43.000Z
|
2022-03-31T02:11:10.000Z
|
tests/modules/transformer/positional_encoding_test.py
|
MSLars/allennlp
|
2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475
|
[
"Apache-2.0"
] | 2,560 |
2017-06-26T21:16:53.000Z
|
2022-03-30T07:55:46.000Z
|
import copy
import torch
import numpy as np
from allennlp.common import Params
from allennlp.modules.transformer import SinusoidalPositionalEncoding
from allennlp.common.testing import AllenNlpTestCase
class TestSinusoidalPositionalEncoding(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.params_dict = {
"min_timescale": 1.0,
"max_timescale": 1.0e4,
}
params = Params(copy.deepcopy(self.params_dict))
self.positional_encoding = SinusoidalPositionalEncoding.from_params(params)
def test_can_construct_from_params(self):
assert self.positional_encoding.min_timescale == self.params_dict["min_timescale"]
assert self.positional_encoding.max_timescale == self.params_dict["max_timescale"]
def test_forward(self):
tensor2tensor_result = np.asarray(
[
[0.00000000e00, 0.00000000e00, 1.00000000e00, 1.00000000e00],
[8.41470957e-01, 9.99999902e-05, 5.40302277e-01, 1.00000000e00],
[9.09297407e-01, 1.99999980e-04, -4.16146845e-01, 1.00000000e00],
]
)
tensor = torch.zeros([2, 3, 4])
result = self.positional_encoding(tensor)
np.testing.assert_almost_equal(result[0].detach().cpu().numpy(), tensor2tensor_result)
np.testing.assert_almost_equal(result[1].detach().cpu().numpy(), tensor2tensor_result)
# Check case with odd number of dimensions.
tensor2tensor_result = np.asarray(
[
[
0.00000000e00,
0.00000000e00,
0.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
0.00000000e00,
],
[
8.41470957e-01,
9.99983307e-03,
9.99999902e-05,
5.40302277e-01,
9.99949992e-01,
1.00000000e00,
0.00000000e00,
],
[
9.09297407e-01,
1.99986659e-02,
1.99999980e-04,
-4.16146815e-01,
9.99800026e-01,
1.00000000e00,
0.00000000e00,
],
]
)
tensor = torch.zeros([2, 3, 7])
result = self.positional_encoding(tensor)
np.testing.assert_almost_equal(result[0].detach().cpu().numpy(), tensor2tensor_result)
np.testing.assert_almost_equal(result[1].detach().cpu().numpy(), tensor2tensor_result)
| 35.155844 | 94 | 0.549686 |
98bd7b6a63b51d6510d82ed04e9d4f1ed5b0fe12
| 1,380 |
py
|
Python
|
src_python/WS2801/EffectColorWheel.py
|
AmiableAnteater/wohnzimmerlicht
|
eafda7911d95ee9f1b9a222e3e208e2debe8e03e
|
[
"Apache-2.0"
] | null | null | null |
src_python/WS2801/EffectColorWheel.py
|
AmiableAnteater/wohnzimmerlicht
|
eafda7911d95ee9f1b9a222e3e208e2debe8e03e
|
[
"Apache-2.0"
] | null | null | null |
src_python/WS2801/EffectColorWheel.py
|
AmiableAnteater/wohnzimmerlicht
|
eafda7911d95ee9f1b9a222e3e208e2debe8e03e
|
[
"Apache-2.0"
] | null | null | null |
from .WS2801Wrapper import WS2801Wrapper
from threading import Thread, Event
from .EffectUtils import run_effect
from time import monotonic
def effect(pixels: WS2801Wrapper, event: Event, stretch_factor=1, speed=10):
pixel_count = pixels.count()
stretched_wheel_size = int(round(255 * stretch_factor))
inverse_factor = 255 / stretched_wheel_size
zero_pos = 0
do_run = True
last_time = monotonic()
while do_run:
current_time = monotonic()
time_delta = current_time - last_time
#print('delta:' + str(time_delta))
last_time = current_time
zero_pos += time_delta * speed
zero_pos = zero_pos % stretched_wheel_size
#print(str(zero_pos))
pixel_idx = 0
pos_iterator = zero_pos
while pixel_idx < pixel_count:
color_index = round(pos_iterator * inverse_factor)
#print('pixel_idx: ' + str(pixel_idx) + 'col_idx:' + str(color_index))
pixels.set_pixel_colorwheel(color_index, pixel_idx)
pos_iterator += 1
if pos_iterator > stretched_wheel_size:
pos_iterator -= stretched_wheel_size
pixel_idx += 1
pixels.show()
if event.wait(.02):
do_run = False
if __name__ == "__main__":
run_effect(effect, (.75, 10))
| 34.5 | 83 | 0.618841 |
03afb57b3c473fcd7964907911140d488a45e053
| 16,608 |
py
|
Python
|
postfinancecheckout/api/shopify_recurring_order_service_api.py
|
pfpayments/python-sdk
|
b8ef159ea3c843a8d0361d1e0b122a9958adbcb4
|
[
"Apache-2.0"
] | 1 |
2022-03-08T12:51:53.000Z
|
2022-03-08T12:51:53.000Z
|
postfinancecheckout/api/shopify_recurring_order_service_api.py
|
pfpayments/python-sdk
|
b8ef159ea3c843a8d0361d1e0b122a9958adbcb4
|
[
"Apache-2.0"
] | null | null | null |
postfinancecheckout/api/shopify_recurring_order_service_api.py
|
pfpayments/python-sdk
|
b8ef159ea3c843a8d0361d1e0b122a9958adbcb4
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
import six
from postfinancecheckout.api_client import ApiClient
class ShopifyRecurringOrderServiceApi:
def __init__(self, configuration):
self.api_client = ApiClient(configuration=configuration)
def count(self, space_id, **kwargs):
"""Count
Counts the number of items in the database as restricted by the given filter.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.count(space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param EntityQueryFilter filter: The filter which restricts the entities which are used to calculate the count.
:return: int
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.count_with_http_info(space_id, **kwargs)
else:
(data) = self.count_with_http_info(space_id, **kwargs)
return data
def count_with_http_info(self, space_id, **kwargs):
"""Count
Counts the number of items in the database as restricted by the given filter.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.count_with_http_info(space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param EntityQueryFilter filter: The filter which restricts the entities which are used to calculate the count.
:return: int
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['space_id', 'filter']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method count" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'space_id' is set
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `count`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'filter' in params:
body_params = params['filter']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json;charset=utf-8'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/shopify-recurring-order/count', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='int',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read(self, space_id, id, **kwargs):
"""Read
Reads the entity with the given 'id' and returns it.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read(space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param int id: The id of the Shopify recurring order which should be returned. (required)
:return: ShopifyRecurringOrder
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_with_http_info(space_id, id, **kwargs)
else:
(data) = self.read_with_http_info(space_id, id, **kwargs)
return data
def read_with_http_info(self, space_id, id, **kwargs):
"""Read
Reads the entity with the given 'id' and returns it.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_with_http_info(space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param int id: The id of the Shopify recurring order which should be returned. (required)
:return: ShopifyRecurringOrder
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['space_id', 'id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'space_id' is set
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `read`")
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `read`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
if 'id' in params:
query_params.append(('id', params['id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['*/*'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/shopify-recurring-order/read', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ShopifyRecurringOrder',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search(self, space_id, query, **kwargs):
"""Search
Searches for the entities as specified by the given query.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search(space_id, query, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param EntityQuery query: The query restricts the Shopify recurring orders which are returned by the search. (required)
:return: list[ShopifyRecurringOrder]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_with_http_info(space_id, query, **kwargs)
else:
(data) = self.search_with_http_info(space_id, query, **kwargs)
return data
def search_with_http_info(self, space_id, query, **kwargs):
"""Search
Searches for the entities as specified by the given query.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_with_http_info(space_id, query, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param EntityQuery query: The query restricts the Shopify recurring orders which are returned by the search. (required)
:return: list[ShopifyRecurringOrder]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['space_id', 'query']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'space_id' is set
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `search`")
# verify the required parameter 'query' is set
if ('query' not in params or
params['query'] is None):
raise ValueError("Missing the required parameter `query` when calling `search`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'query' in params:
body_params = params['query']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json;charset=utf-8'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/shopify-recurring-order/search', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ShopifyRecurringOrder]',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update(self, space_id, update_request, **kwargs):
"""Update
This operation allows to update a Shopify recurring order.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update(space_id, update_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param ShopifyRecurringOrderUpdateRequest update_request: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_with_http_info(space_id, update_request, **kwargs)
else:
(data) = self.update_with_http_info(space_id, update_request, **kwargs)
return data
def update_with_http_info(self, space_id, update_request, **kwargs):
"""Update
This operation allows to update a Shopify recurring order.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_with_http_info(space_id, update_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param ShopifyRecurringOrderUpdateRequest update_request: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['space_id', 'update_request']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'space_id' is set
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `update`")
# verify the required parameter 'update_request' is set
if ('update_request' not in params or
params['update_request'] is None):
raise ValueError("Missing the required parameter `update_request` when calling `update`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'update_request' in params:
body_params = params['update_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json;charset=utf-8'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/shopify-recurring-order/update', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 37.321348 | 127 | 0.608321 |
8a6647b55bef1896cd57f9e8bee86a7ed1728add
| 880 |
py
|
Python
|
azure/servicefabric/models/service_name_info.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 4 |
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure/servicefabric/models/service_name_info.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 2 |
2016-09-30T21:40:24.000Z
|
2017-11-10T18:16:18.000Z
|
azure/servicefabric/models/service_name_info.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 3 |
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ServiceNameInfo(Model):
"""Information about the service name.
:param id:
:type id: str
:param name:
:type name: str
"""
_attribute_map = {
'id': {'key': 'Id', 'type': 'str'},
'name': {'key': 'Name', 'type': 'str'},
}
def __init__(self, id=None, name=None):
self.id = id
self.name = name
| 27.5 | 76 | 0.529545 |
50ff8f842fdb67beb9c2ec270900ef8ad9891074
| 2,360 |
py
|
Python
|
test/CXX/CXXVERSION.py
|
Valkatraz/scons
|
5e70c65f633dcecc035751c9f0c6f894088df8a0
|
[
"MIT"
] | 1,403 |
2017-11-23T14:24:01.000Z
|
2022-03-30T20:59:39.000Z
|
test/CXX/CXXVERSION.py
|
Valkatraz/scons
|
5e70c65f633dcecc035751c9f0c6f894088df8a0
|
[
"MIT"
] | 3,708 |
2017-11-27T13:47:12.000Z
|
2022-03-29T17:21:17.000Z
|
test/CXX/CXXVERSION.py
|
Valkatraz/scons
|
5e70c65f633dcecc035751c9f0c6f894088df8a0
|
[
"MIT"
] | 281 |
2017-12-01T23:48:38.000Z
|
2022-03-31T15:25:44.000Z
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import sys
import TestSCons
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
if sys.platform == 'win32':
test.skip_test('CXXVERSION not set with MSVC, skipping test.')
test.write("versioned.py", """\
import subprocess
import sys
if '-dumpversion' in sys.argv:
print('3.9.9')
sys.exit(0)
if '--version' in sys.argv:
print('this is version 2.9.9 wrapping', sys.argv[2])
sys.exit(0)
if sys.argv[1] not in [ '2.9.9', '3.9.9' ]:
print('wrong version', sys.argv[1], 'when wrapping', sys.argv[2])
sys.exit(1)
subprocess.run(" ".join(sys.argv[2:]), shell=True)
""")
test.write('SConstruct', """
cxx = Environment().Dictionary('CXX')
foo = Environment(CXX = r'%(_python_)s versioned.py "${CXXVERSION}" ' + cxx)
foo.Program(target = 'foo', source = 'foo.cpp')
""" % locals())
test.write('foo.cpp', r"""
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
printf("foo.c\n");
exit (0);
}
""")
test.run(arguments = 'foo' + _exe)
test.up_to_date(arguments = 'foo' + _exe)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 28.433735 | 76 | 0.709322 |
935ca53c265ed1025f0064513f00ead08e57e971
| 10,329 |
py
|
Python
|
estimagic/dashboard/monitoring_app.py
|
yradeva93/estimagic
|
77d840ba01748314b35be8117c99460a1944062f
|
[
"BSD-3-Clause"
] | null | null | null |
estimagic/dashboard/monitoring_app.py
|
yradeva93/estimagic
|
77d840ba01748314b35be8117c99460a1944062f
|
[
"BSD-3-Clause"
] | null | null | null |
estimagic/dashboard/monitoring_app.py
|
yradeva93/estimagic
|
77d840ba01748314b35be8117c99460a1944062f
|
[
"BSD-3-Clause"
] | null | null | null |
"""Show the development of one optimization's criterion and parameters over time."""
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
from bokeh.layouts import Column
from bokeh.layouts import Row
from bokeh.models import ColumnDataSource
from bokeh.models import Panel
from bokeh.models import Tabs
from bokeh.models import Toggle
from jinja2 import Environment
from jinja2 import FileSystemLoader
from estimagic.dashboard.monitoring_callbacks import activation_callback
from estimagic.dashboard.monitoring_callbacks import logscale_callback
from estimagic.dashboard.plot_functions import plot_time_series
from estimagic.logging.database_utilities import load_database
from estimagic.logging.database_utilities import read_last_rows
from estimagic.logging.read_log import read_start_params
def monitoring_app(
doc,
database_name,
session_data,
updating_options,
start_immediately,
):
"""Create plots showing the development of the criterion and parameters.
Args:
doc (bokeh.Document): Argument required by bokeh.
database_name (str): Short and unique name of the database.
session_data (dict): Infos to be passed between and within apps.
Keys of this app's entry are:
- last_retrieved (int): last iteration currently in the ColumnDataSource.
- database_path (str or pathlib.Path)
- callbacks (dict): dictionary to be populated with callbacks.
updating_options (dict): Specification how to update the plotting data.
It contains rollover, update_frequency, update_chunk, jump and stride.
"""
# style the Document
template_folder = Path(__file__).resolve().parent
# conversion to string from pathlib Path is necessary for FileSystemLoader
env = Environment(loader=FileSystemLoader(str(template_folder)))
doc.template = env.get_template("index.html")
# process inputs
database = load_database(path=session_data["database_path"])
start_point = _calculate_start_point(database, updating_options)
session_data["last_retrieved"] = start_point
start_params = read_start_params(path_or_database=database)
start_params["id"] = _create_id_column(start_params)
group_to_param_ids = _map_group_to_other_column(start_params, "id")
group_to_param_names = _map_group_to_other_column(start_params, "name")
criterion_history, params_history = _create_cds_for_monitoring_app(
group_to_param_ids
)
# create elements
button_row = _create_button_row(
doc=doc,
database=database,
session_data=session_data,
start_params=start_params,
updating_options=updating_options,
)
monitoring_plots = _create_initial_convergence_plots(
criterion_history=criterion_history,
params_history=params_history,
group_to_param_ids=group_to_param_ids,
group_to_param_names=group_to_param_names,
)
# add elements to bokeh Document
grid = Column(children=[button_row, *monitoring_plots], sizing_mode="stretch_width")
convergence_tab = Panel(child=grid, title="Convergence Tab")
tabs = Tabs(tabs=[convergence_tab])
doc.add_root(tabs)
if start_immediately:
activation_button = doc.get_model_by_name("activation_button")
activation_button.active = True
def _create_id_column(df):
"""Create a column that gives the position for plotted parameters and is None else.
Args:
df (pd.DataFrame)
Returns:
ids (pd.Series): integer position in the DataFrame unless the group was
None, False, np.nan or an empty string.
"""
ids = pd.Series(range(len(df)), dtype=object, index=df.index)
ids[df["group"].isin([None, False, np.nan, ""])] = None
return ids.astype(str)
def _map_group_to_other_column(params, column_name):
"""Map the group name to lists of one column's values of the group's parameters.
Args:
params (pd.DataFrame): Includes the "group" and "id" columns.
column_name (str): name of the column for which to return the parameter values.
Returns:
group_to_values (dict): Keys are the values of the "group" column.
The values are lists of parameter values of the parameters belonging
to the particular group.
"""
to_plot = params[~params["group"].isin([None, False, np.nan, ""])]
group_to_indices = to_plot.groupby("group").groups
group_to_values = {}
for group, loc in group_to_indices.items():
group_to_values[group] = to_plot[column_name].loc[loc].tolist()
return group_to_values
def _create_cds_for_monitoring_app(group_to_param_ids):
"""Create the ColumnDataSources for saving the criterion and parameter values.
They will be periodically updated from the database.
There is a ColumnDataSource for all parameters and one for the criterion value.
The "x" column is called "iteration".
Args:
group_to_param_ids (dict): Keys are the groups to be plotted. The values are
the ids of the parameters belonging to the particular group.
Returns:
criterion_history (bokeh.ColumnDataSource)
params_history (bokeh.ColumnDataSource)
"""
crit_data = {"iteration": [], "criterion": []}
criterion_history = ColumnDataSource(crit_data, name="criterion_history_cds")
param_ids = []
for id_list in group_to_param_ids.values():
param_ids += id_list
params_data = {id_: [] for id_ in param_ids + ["iteration"]}
params_history = ColumnDataSource(params_data, name="params_history_cds")
return criterion_history, params_history
def _calculate_start_point(database, updating_options):
"""Calculate the starting point.
Args:
database (sqlalchemy.MetaData): Bound metadata object.
updating_options (dict): Specification how to update the plotting data.
It contains rollover, update_frequency, update_chunk, jump and stride.
Returns:
start_point (int): iteration from which to start the dashboard.
"""
if updating_options["jump"]:
last_entry = read_last_rows(
database=database,
table_name="optimization_iterations",
n_rows=1,
return_type="list_of_dicts",
)
nr_of_entries = last_entry[0]["rowid"]
nr_to_go_back = updating_options["rollover"] * updating_options["stride"]
start_point = max(0, nr_of_entries - nr_to_go_back)
else:
start_point = 0
return start_point
def _create_initial_convergence_plots(
criterion_history,
params_history,
group_to_param_ids,
group_to_param_names,
):
"""Create the initial convergence plots.
Args:
criterion_history (bokeh ColumnDataSource)
params_history (bokeh ColumnDataSource)
group_to_param_ids (dict): Keys are the groups to be plotted. Values are the
ids of the parameters belonging to the respective group.
group_to_param_names (dict): Keys are the groups to be plotted. Values are the
names of the parameters belonging to the respective group.
Returns:
convergence_plots (list): List of bokeh Row elements, each containing one
convergence plot.
"""
param_plots = []
for group, param_ids in group_to_param_ids.items():
param_names = group_to_param_names[group]
param_group_plot = plot_time_series(
data=params_history,
y_keys=param_ids,
y_names=param_names,
x_name="iteration",
title=str(group),
)
param_plots.append(param_group_plot)
arranged_param_plots = [Row(plot) for plot in param_plots]
linear_criterion_plot = plot_time_series(
data=criterion_history,
x_name="iteration",
y_keys=["criterion"],
y_names=["criterion"],
title="Criterion",
name="linear_criterion_plot",
logscale=False,
)
log_criterion_plot = plot_time_series(
data=criterion_history,
x_name="iteration",
y_keys=["criterion"],
y_names=["criterion"],
title="Criterion",
name="log_criterion_plot",
logscale=True,
)
log_criterion_plot.visible = False
plot_list = [
Row(linear_criterion_plot),
Row(log_criterion_plot),
] + arranged_param_plots
return plot_list
def _create_button_row(
doc,
database,
session_data,
start_params,
updating_options,
):
"""Create a row with two buttons, one for (re)starting and one for scale switching.
Args:
doc (bokeh.Document)
database (sqlalchemy.MetaData): Bound metadata object.
session_data (dict): dictionary with the last retrieved rowid
start_params (pd.DataFrame): See :ref:`params`
updating_options (dict): Specification how to update the plotting data.
It contains rollover, update_frequency, update_chunk, jump and stride.
Returns:
bokeh.layouts.Row
"""
# (Re)start convergence plot button
activation_button = Toggle(
active=False,
label="Start Updating",
button_type="danger",
width=200,
height=30,
name="activation_button",
)
partialed_activation_callback = partial(
activation_callback,
button=activation_button,
doc=doc,
database=database,
session_data=session_data,
tables=["criterion_history", "params_history"],
start_params=start_params,
updating_options=updating_options,
)
activation_button.on_change("active", partialed_activation_callback)
# switch between linear and logscale button
logscale_button = Toggle(
active=False,
label="Show criterion plot on a logarithmic scale",
button_type="default",
width=200,
height=30,
name="logscale_button",
)
partialed_logscale_callback = partial(
logscale_callback,
button=logscale_button,
doc=doc,
)
logscale_button.on_change("active", partialed_logscale_callback)
button_row = Row(children=[activation_button, logscale_button], name="button_row")
return button_row
| 33.976974 | 88 | 0.694743 |
8311b69f65ad40d4f389e3f338ef96d15cb97557
| 4,426 |
py
|
Python
|
app.py
|
callzhang/qdtrack
|
4cdd29281a9dc81a9265c916540be1bd2dce08fd
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
callzhang/qdtrack
|
4cdd29281a9dc81a9265c916540be1bd2dce08fd
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
callzhang/qdtrack
|
4cdd29281a9dc81a9265c916540be1bd2dce08fd
|
[
"Apache-2.0"
] | null | null | null |
from qdtrack.apis.inference import init_model, inference_model, export_video
from tools.post_processing import post_processing, convert_result, merge_result
from glob import glob
import pickle, os, torch, sys, logging
from flask import Flask, request, jsonify, send_file
from werkzeug.utils import secure_filename
# fairmot
from src import _init_paths
import datasets.dataset.jde as datasets
from opts import opts
from track import eval_seq
from tracking_utils.log import logger
logger.setLevel(logging.INFO)
class_ID = {
'Car': 0,
'Van': 1,
'Truck': 2,
'Pedestrian': 3,
'Bus': 4,
'Misc': 5,
'DontCare': 6,
'ignored': 7
}
ID_class = {v: k for k, v in class_ID.items()}
app = Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 1024 * 1024 * 50 # 50MB max
# app.config['UPLOAD_EXTENSIONS'] = ['.mp4']
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = None
@app.route("/tracking", methods=["POST"])
def track():
file = request.files['video']
assert file.content_type[:6] == 'video/', 'Invalid content_type, must upload a video file (*.mp4)'
filename = secure_filename(file.filename)
#TODO: pass link from aliyun OSS
# save to temp folder
video_path = f'temp/{filename}'
os.makedirs('temp', exist_ok=True)
file.save(video_path)
# inference
result = inference_model(model, video_path)
output = convert_result(result)
#post processing
view_video = request.form.get('view_video', False)
result = post_processing(output, video_path, model.CLASSES, get_video=view_video)
if view_video:
return send_file(result)
return jsonify(result)
@app.route("/dual_tracking", methods=["POST"])
def dual_track():
'''
track a video with qdtrack and fairmot
'''
file = request.files['video']
assert file.content_type[:6] == 'video/', 'Invalid content_type, must upload a video file (*.mp4)'
filename = secure_filename(file.filename)
#TODO: pass link from aliyun OSS
# save to temp folder
video_path = f'temp/{filename}'
os.makedirs('temp', exist_ok=True)
file.save(video_path)
# inference vihicle
result_dict_1 = inference_model(model, video_path)
result1 = convert_result(result_dict_1)
# inference perdestrain from fairmot
result2 = track_fairmot(video_path)
# merge result
output = video_path.replace('.mp4', '.csv')
merge_result(result1, result2, output)
#post processing
view_video = request.form.get('view_video', False)
result = post_processing(output, video_path, ID_class, get_video=view_video)
if view_video:
return send_file(result)
return jsonify(result)
def test_run(video):
fname = video.split('/')[-1].replace('.mp4', '_result.mp4')
# if os.path.exists('output/'+fname): return
result = inference_model(model, video)
# pickle.dump(result, open('temp/result', 'wb'))
# result = pickle.load(open('temp/result', 'rb'))
output = convert_result(result)
# output = 'temp/result.csv'
post_processing(output, video, model.CLASSES, get_video=True)
def track_fairmot(video):
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
opt = opts().init(
[
'--load_model', 'models/fairmot_dla34.pth',
'--input_video', 'videos/video.mp4',
'--output_root', 'output',
'--num_classes', '1',
'--dataset', 'kitti'
])
result_filename = opt.result_file
print('Starting tracking...')
dataloader = datasets.LoadVideo(video, opt.img_size)
fps = dataloader.frame_rate
opt.track_buffer = fps * opt.track_duration
opt.fps = fps
eval_seq(opt, dataloader, opt.dataset, result_filename,
save_dir=None, show_image=opt.show_image, frame_rate=fps,
use_cuda=opt.gpus != [-1])
return result_filename
if __name__ == '__main__':
model = init_model(
config='configs/qdtrack-basch.py',
checkpoint='models/qdtrack-frcnn_r50_fpn_12e_bdd100k-13328aed.pth',
# config='configs/tao/qdtrack_frcnn_r50_fpn_24e_lvis.py',
# checkpoint='models/qdtrack_tao.pth',
)
app.run(port=5005, debug=True, host='0.0.0.0')
# videos = glob('data/bosch_tracking/*.mp4')
# for video in videos:
# test_run(video)
# test_run('data/video_10s.mp4')
# test_run('data/video.mp4')
# track_fairmot('data/video_10s.mp4')
| 32.544118 | 102 | 0.671939 |
2d6359e7a345f8bf07d93707898e4782cf7ca055
| 8,235 |
py
|
Python
|
embedding-calculator/src/services/facescan/plugins/insightface/insightface.py
|
Precistat/CompreFace
|
dc850c23d4cadc355f77cef08adbbd5f430c01b7
|
[
"Apache-2.0"
] | 1 |
2021-12-14T10:07:07.000Z
|
2021-12-14T10:07:07.000Z
|
embedding-calculator/src/services/facescan/plugins/insightface/insightface.py
|
Precistat/CompreFace
|
dc850c23d4cadc355f77cef08adbbd5f430c01b7
|
[
"Apache-2.0"
] | null | null | null |
embedding-calculator/src/services/facescan/plugins/insightface/insightface.py
|
Precistat/CompreFace
|
dc850c23d4cadc355f77cef08adbbd5f430c01b7
|
[
"Apache-2.0"
] | 1 |
2021-10-10T04:30:14.000Z
|
2021-10-10T04:30:14.000Z
|
# Copyright (c) 2020 the original author or authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
import functools
from typing import List, Tuple
import attr
import numpy as np
import mxnet as mx
from cached_property import cached_property
from insightface.app import FaceAnalysis
from insightface.model_zoo import (model_store, face_detection,
face_recognition, face_genderage)
from insightface.utils import face_align
from src.constants import ENV
from src.services.dto.bounding_box import BoundingBoxDTO
from src.services.dto.json_encodable import JSONEncodable
from src.services.facescan.imgscaler.imgscaler import ImgScaler
from src.services.facescan.plugins import base, mixins, exceptions
from src.services.facescan.plugins.insightface import helpers as insight_helpers
from src.services.dto import plugin_result
from src.services.imgtools.types import Array3D
logger = logging.getLogger(__name__)
class InsightFaceMixin:
_CTX_ID = ENV.GPU_IDX
_NMS = 0.4
def get_model_file(self, ml_model: base.MLModel):
if not ml_model.exists():
raise exceptions.ModelImportException(
f'Model {ml_model.name} does not exists')
return model_store.find_params_file(ml_model.path)
class DetectionOnlyFaceAnalysis(FaceAnalysis):
rec_model = None
ga_model = None
def __init__(self, file):
self.det_model = face_detection.FaceDetector(file, 'net3')
class FaceDetector(InsightFaceMixin, mixins.FaceDetectorMixin, base.BasePlugin):
ml_models = (
('retinaface_mnet025_v1', '1ggNFFqpe0abWz6V1A82rnxD6fyxB8W2c'),
('retinaface_mnet025_v2', '1EYTMxgcNdlvoL1fSC8N1zkaWrX75ZoNL'),
('retinaface_r50_v1', '1LZ5h9f_YC5EdbIZAqVba9TKHipi90JBj'),
)
IMG_LENGTH_LIMIT = ENV.IMG_LENGTH_LIMIT
IMAGE_SIZE = 112
det_prob_threshold = 0.8
@cached_property
def _detection_model(self):
model_file = self.get_model_file(self.ml_model)
model = DetectionOnlyFaceAnalysis(model_file)
model.prepare(ctx_id=self._CTX_ID, nms=self._NMS)
return model
def find_faces(self, img: Array3D, det_prob_threshold: float = None) -> List[BoundingBoxDTO]:
if det_prob_threshold is None:
det_prob_threshold = self.det_prob_threshold
assert 0 <= det_prob_threshold <= 1
scaler = ImgScaler(self.IMG_LENGTH_LIMIT)
img = scaler.downscale_img(img)
results = self._detection_model.get(img, det_thresh=det_prob_threshold)
boxes = []
for result in results:
downscaled_box_array = result.bbox.astype(np.int).flatten()
downscaled_box = BoundingBoxDTO(x_min=downscaled_box_array[0],
y_min=downscaled_box_array[1],
x_max=downscaled_box_array[2],
y_max=downscaled_box_array[3],
probability=result.det_score,
np_landmarks=result.landmark)
box = downscaled_box.scaled(scaler.upscale_coefficient)
if box.probability <= det_prob_threshold:
logger.debug(f'Box Filtered out because below threshold ({det_prob_threshold}: {box})')
continue
logger.debug(f"Found: {box}")
boxes.append(box)
return boxes
def crop_face(self, img: Array3D, box: BoundingBoxDTO) -> Array3D:
return face_align.norm_crop(img, landmark=box._np_landmarks,
image_size=self.IMAGE_SIZE)
class Calculator(InsightFaceMixin, mixins.CalculatorMixin, base.BasePlugin):
ml_models = (
('arcface_mobilefacenet', '17TpxpyHuUc1ZTm3RIbfvhnBcZqhyKszV', (1.26538905, 5.552089201), 200),
('arcface_r100_v1', '11xFaEHIQLNze3-2RUV1cQfT-q6PKKfYp', (1.23132175, 6.602259425), 400),
('arcface_resnet34', '1ECp5XrLgfEAnwyTYFEhJgIsOAw6KaHa7', (1.2462842, 5.981636853), 400),
('arcface_resnet50', '1a9nib4I9OIVORwsqLB0gz0WuLC32E8gf', (1.2375747, 5.973354538), 400),
('arcface-r50-msfdrop75', '1gNuvRNHCNgvFtz7SjhW82v2-znlAYaRO', (1.2350148, 7.071431642), 400),
('arcface-r100-msfdrop75', '1lAnFcBXoMKqE-SkZKTmi6MsYAmzG0tFw', (1.224676, 6.322647217), 400),
)
def calc_embedding(self, face_img: Array3D) -> Array3D:
return self._calculation_model.get_embedding(face_img).flatten()
@cached_property
def _calculation_model(self):
model_file = self.get_model_file(self.ml_model)
model = face_recognition.FaceRecognition(
self.ml_model.name, True, model_file)
model.prepare(ctx_id=self._CTX_ID)
return model
@attr.s(auto_attribs=True, frozen=True)
class GenderAgeDTO(JSONEncodable):
gender: str
age: Tuple[int, int]
class BaseGenderAge(InsightFaceMixin, base.BasePlugin):
ml_models = (
('genderage_v1', '1J9hqSWqZz6YvMMNrDrmrzEW9anhvdKuC'),
)
CACHE_FIELD = '_genderage_cached_result'
def _evaluate_model(self, face: plugin_result.FaceDTO):
cached_result = getattr(face, self.CACHE_FIELD, None)
if not cached_result:
cached_result = self._genderage_model.get(face._face_img)
setattr(face, self.CACHE_FIELD, cached_result)
return cached_result
@cached_property
def _genderage_model(self):
model_file = self.get_model_file(self.ml_model)
model = face_genderage.FaceGenderage(
self.ml_model.name, True, model_file)
model.prepare(ctx_id=self._CTX_ID)
return model
class GenderDetector(BaseGenderAge):
slug = "gender"
GENDERS = ('female', 'male')
def __call__(self, face: plugin_result.FaceDTO):
gender, age = self._evaluate_model(face)
return plugin_result.GenderDTO(gender=self.GENDERS[int(gender)])
class AgeDetector(BaseGenderAge):
slug = "age"
def __call__(self, face: plugin_result.FaceDTO):
gender, age = self._evaluate_model(face)
return plugin_result.AgeDTO(age=(age, age))
class LandmarksDetector(mixins.LandmarksDetectorMixin, base.BasePlugin):
""" Extract landmarks from FaceDetector results."""
class Landmarks2d106DTO(plugin_result.LandmarksDTO):
"""
106-points facial landmarks
Points mark-up - https://github.com/deepinsight/insightface/tree/master/alignment/coordinateReg#visualization
"""
NOSE_POSITION = 86
class Landmarks2d106Detector(InsightFaceMixin, mixins.LandmarksDetectorMixin,
base.BasePlugin):
slug = 'landmarks2d106'
ml_models = (
('2d106det', '1MBWbTEYRhZFzj_O2f2Dc6fWGXFWtbMFw'),
)
CROP_SIZE = (192, 192) # model requirements
def __call__(self, face: plugin_result.FaceDTO):
landmarks = insight_helpers.predict_landmark2d106(
self._landmark_model, face._img, self.CROP_SIZE,
face.box.center, (face.box.width, face.box.height),
)
return Landmarks2d106DTO(landmarks=landmarks.astype(int).tolist())
@cached_property
def _landmark_model(self):
model_prefix = f'{self.ml_model.path}/{self.ml_model.name}'
sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 0)
ctx = mx.gpu(self._CTX_ID) if self._CTX_ID >= 0 else mx.cpu()
all_layers = sym.get_internals()
sym = all_layers['fc1_output']
model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
model.bind(for_training=False,
data_shapes=[('data', (1, 3, *self.CROP_SIZE))])
model.set_params(arg_params, aux_params)
return model
| 38.661972 | 113 | 0.686096 |
58e3095ee6efa78dee5ecb30e3f1805f15bb0e61
| 1,607 |
py
|
Python
|
tfserve/loader.py
|
gar1t/tfserve
|
cd8cb67f87f3f6f1309628f62184f19da106e2dc
|
[
"MIT"
] | 31 |
2018-09-05T17:14:16.000Z
|
2021-03-26T12:39:23.000Z
|
tfserve/loader.py
|
gar1t/tfserve
|
cd8cb67f87f3f6f1309628f62184f19da106e2dc
|
[
"MIT"
] | 5 |
2018-10-10T13:02:49.000Z
|
2020-06-23T15:04:38.000Z
|
tfserve/loader.py
|
gar1t/tfserve
|
cd8cb67f87f3f6f1309628f62184f19da106e2dc
|
[
"MIT"
] | 10 |
2018-10-15T16:15:22.000Z
|
2020-01-23T08:37:37.000Z
|
"""
Module that handles loading a tensorflow model in several different forms.
"""
import os
import tensorflow as tf
def load_model(model_path):
"""
Loads a tensorflow model return a tf.Session running on the loaded model (the graph).
:param str model_path: It can be a `.pb` file or directory containing checkpoint files.
:return: tf.Session running the model graph.
"""
if model_path is None:
raise ValueError("model_path must not be None")
if not os.path.exists(model_path):
raise ValueError("model_path must exist")
if os.path.isfile(model_path) and model_path.endswith(".pb"):
return _load_pb(model_path)
if os.path.isdir(model_path):
for f in os.listdir(model_path):
if f.endswith(".pb"):
return _load_pb(os.path.join(model_path, f))
return _load_ckpt(model_path)
def _load_pb(model_path):
"""
Loads from a '.pb' model file.
"""
graph = tf.Graph()
sess = tf.Session(graph=graph)
with tf.gfile.FastGFile(model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with sess.graph.as_default():
tf.import_graph_def(graph_def)
return sess
def _load_ckpt(model_dir):
"""
Loads from a checkpoint directory.
"""
graph = tf.Graph()
sess = tf.Session(graph=graph)
with graph.as_default():
ckpt_path = tf.train.latest_checkpoint(model_dir)
saver = tf.train.import_meta_graph('{}.meta'.format(ckpt_path))
saver.restore(sess, ckpt_path)
return sess
| 27.237288 | 91 | 0.65028 |
2110a6e6d578fd0fcc8269d00ad6dcf4799a8390
| 493 |
py
|
Python
|
application/utils/log_handler.py
|
lahdjirayhan/drive-kesma-library-linker
|
7c945e2b8efd8d05a571b563e0738dc3c086263e
|
[
"Unlicense"
] | null | null | null |
application/utils/log_handler.py
|
lahdjirayhan/drive-kesma-library-linker
|
7c945e2b8efd8d05a571b563e0738dc3c086263e
|
[
"Unlicense"
] | null | null | null |
application/utils/log_handler.py
|
lahdjirayhan/drive-kesma-library-linker
|
7c945e2b8efd8d05a571b563e0738dc3c086263e
|
[
"Unlicense"
] | null | null | null |
import logging
# https://stackoverflow.com/questions/36408496/python-logging-handler-to-append-to-list
# Both answers combined
class ListHandler(logging.Handler):
def __init__(self, *args, message_list, **kwargs):
logging.Handler.__init__(self, *args, **kwargs)
self.setFormatter(logging.Formatter('%(message)s'))
self.setLevel(logging.INFO)
self.message_list = message_list
def emit(self, record):
self.message_list.append(record.getMessage())
| 41.083333 | 87 | 0.716024 |
813abdc0b8b243c881946660276c4fda5f15aa76
| 7,246 |
py
|
Python
|
opencv_engine/engine.py
|
scorphus/opencv-engine
|
ae02452d9d6ae884c9a4ae22382e34b7d6e6fc11
|
[
"MIT"
] | 17 |
2015-02-23T04:15:21.000Z
|
2019-05-05T23:08:08.000Z
|
opencv_engine/engine.py
|
scorphus/opencv-engine
|
ae02452d9d6ae884c9a4ae22382e34b7d6e6fc11
|
[
"MIT"
] | 25 |
2015-01-14T17:20:44.000Z
|
2022-01-24T02:47:01.000Z
|
opencv_engine/engine.py
|
scorphus/opencv-engine
|
ae02452d9d6ae884c9a4ae22382e34b7d6e6fc11
|
[
"MIT"
] | 26 |
2015-02-23T04:16:58.000Z
|
2020-01-09T00:21:34.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# thumbor imaging service - opencv engine
# https://github.com/thumbor/opencv-engine
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2014 globo.com [email protected]
try:
import cv
except ImportError:
import cv2.cv as cv
from colour import Color
from thumbor.engines import BaseEngine
from pexif import JpegFile, ExifSegment
try:
from thumbor.ext.filters import _composite
FILTERS_AVAILABLE = True
except ImportError:
FILTERS_AVAILABLE = False
FORMATS = {
'.jpg': 'JPEG',
'.jpeg': 'JPEG',
'.gif': 'GIF',
'.png': 'PNG'
}
class Engine(BaseEngine):
@property
def image_depth(self):
if self.image is None:
return 8
return cv.GetImage(self.image).depth
@property
def image_channels(self):
if self.image is None:
return 3
return self.image.channels
@classmethod
def parse_hex_color(cls, color):
try:
color = Color(color).get_rgb()
return tuple(c * 255 for c in reversed(color))
except Exception:
return None
def gen_image(self, size, color_value):
img0 = cv.CreateImage(size, self.image_depth, self.image_channels)
if color_value == 'transparent':
color = (255, 255, 255, 255)
else:
color = self.parse_hex_color(color_value)
if not color:
raise ValueError('Color %s is not valid.' % color_value)
cv.Set(img0, color)
return img0
def create_image(self, buffer):
# FIXME: opencv doesn't support gifs, even worse, the library
# segfaults when trying to decoding a gif. An exception is a
# less drastic measure.
try:
if FORMATS[self.extension] == 'GIF':
raise ValueError("opencv doesn't support gifs")
except KeyError:
pass
imagefiledata = cv.CreateMatHeader(1, len(buffer), cv.CV_8UC1)
cv.SetData(imagefiledata, buffer, len(buffer))
img0 = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_UNCHANGED)
if FORMATS[self.extension] == 'JPEG':
try:
info = JpegFile.fromString(buffer).get_exif()
if info:
self.exif = info.data
self.exif_marker = info.marker
except Exception:
pass
return img0
@property
def size(self):
return cv.GetSize(self.image)
def normalize(self):
pass
def resize(self, width, height):
thumbnail = cv.CreateImage(
(int(round(width, 0)), int(round(height, 0))),
self.image_depth,
self.image_channels
)
cv.Resize(self.image, thumbnail, cv.CV_INTER_AREA)
self.image = thumbnail
def crop(self, left, top, right, bottom):
new_width = right - left
new_height = bottom - top
cropped = cv.CreateImage(
(new_width, new_height), self.image_depth, self.image_channels
)
src_region = cv.GetSubRect(self.image, (left, top, new_width, new_height))
cv.Copy(src_region, cropped)
self.image = cropped
def rotate(self, degrees):
if (degrees > 180):
# Flip around both axes
cv.Flip(self.image, None, -1)
degrees = degrees - 180
img = self.image
size = cv.GetSize(img)
if (degrees / 90 % 2):
new_size = (size[1], size[0])
center = ((size[0] - 1) * 0.5, (size[0] - 1) * 0.5)
else:
new_size = size
center = ((size[0] - 1) * 0.5, (size[1] - 1) * 0.5)
mapMatrix = cv.CreateMat(2, 3, cv.CV_64F)
cv.GetRotationMatrix2D(center, degrees, 1.0, mapMatrix)
dst = cv.CreateImage(new_size, self.image_depth, self.image_channels)
cv.SetZero(dst)
cv.WarpAffine(img, dst, mapMatrix)
self.image = dst
def flip_vertically(self):
cv.Flip(self.image, None, 1)
def flip_horizontally(self):
cv.Flip(self.image, None, 0)
def read(self, extension=None, quality=None):
if quality is None:
quality = self.context.config.QUALITY
options = None
extension = extension or self.extension
try:
if FORMATS[extension] == 'JPEG':
options = [cv.CV_IMWRITE_JPEG_QUALITY, quality]
except KeyError:
# default is JPEG so
options = [cv.CV_IMWRITE_JPEG_QUALITY, quality]
data = cv.EncodeImage(extension, self.image, options or []).tostring()
if FORMATS[extension] == 'JPEG' and self.context.config.PRESERVE_EXIF_INFO:
if hasattr(self, 'exif'):
img = JpegFile.fromString(data)
img._segments.insert(0, ExifSegment(self.exif_marker, None, self.exif, 'rw'))
data = img.writeString()
return data
def set_image_data(self, data):
cv.SetData(self.image, data)
def image_data_as_rgb(self, update_image=True):
# TODO: Handle other formats
if self.image_channels == 4:
mode = 'BGRA'
elif self.image_channels == 3:
mode = 'BGR'
else:
mode = 'BGR'
rgb_copy = cv.CreateImage((self.image.width, self.image.height), 8, 3)
cv.CvtColor(self.image, rgb_copy, cv.CV_GRAY2BGR)
self.image = rgb_copy
return mode, self.image.tostring()
def draw_rectangle(self, x, y, width, height):
cv.Rectangle(self.image, (int(x), int(y)), (int(x + width), int(y + height)), cv.Scalar(255, 255, 255, 1.0))
def convert_to_grayscale(self):
if self.image_channels >= 3:
# FIXME: OpenCV does not support grayscale with alpha channel?
grayscaled = cv.CreateImage((self.image.width, self.image.height), self.image_depth, 1)
cv.CvtColor(self.image, grayscaled, cv.CV_BGRA2GRAY)
self.image = grayscaled
def paste(self, other_engine, pos, merge=True):
if merge and not FILTERS_AVAILABLE:
raise RuntimeError(
'You need filters enabled to use paste with merge. Please reinstall ' +
'thumbor with proper compilation of its filters.')
self.enable_alpha()
other_engine.enable_alpha()
sz = self.size
other_size = other_engine.size
mode, data = self.image_data_as_rgb()
other_mode, other_data = other_engine.image_data_as_rgb()
imgdata = _composite.apply(
mode, data, sz[0], sz[1],
other_data, other_size[0], other_size[1], pos[0], pos[1], merge)
self.set_image_data(imgdata)
def enable_alpha(self):
if self.image_channels < 4:
with_alpha = cv.CreateImage(
(self.image.width, self.image.height), self.image_depth, 4
)
if self.image_channels == 3:
cv.CvtColor(self.image, with_alpha, cv.CV_BGR2BGRA)
else:
cv.CvtColor(self.image, with_alpha, cv.CV_GRAY2BGRA)
self.image = with_alpha
| 31.641921 | 116 | 0.587911 |
c3af66c6b1cee906a5c5f9ae5f17c95af97293d2
| 1,332 |
py
|
Python
|
plotly/api/v1/clientresp.py
|
SamLau95/plotly.py
|
7dd4ddd4e18e79ff30cec3ef331fa2f8949ed1bd
|
[
"MIT"
] | 5 |
2022-02-20T07:10:02.000Z
|
2022-03-18T17:47:53.000Z
|
plotly/api/v1/clientresp.py
|
SamLau95/plotly.py
|
7dd4ddd4e18e79ff30cec3ef331fa2f8949ed1bd
|
[
"MIT"
] | 9 |
2020-06-05T20:31:50.000Z
|
2022-03-11T23:45:47.000Z
|
plotly/api/v1/clientresp.py
|
SamLau95/plotly.py
|
7dd4ddd4e18e79ff30cec3ef331fa2f8949ed1bd
|
[
"MIT"
] | 2 |
2018-02-13T10:40:10.000Z
|
2021-06-04T11:15:53.000Z
|
"""Interface to deprecated /clientresp API. Subject to deletion."""
from __future__ import absolute_import
import warnings
from requests.compat import json as _json
from plotly import config, utils, version
from plotly.api.v1.utils import request
def clientresp(data, **kwargs):
"""
Deprecated endpoint, still used because it can parse data out of a plot.
When we get around to forcing users to create grids and then create plots,
we can finally get rid of this.
:param (list) data: The data array from a figure.
"""
creds = config.get_credentials()
cfg = config.get_config()
dumps_kwargs = {'sort_keys': True, 'cls': utils.PlotlyJSONEncoder}
payload = {
'platform': 'python', 'version': version.__version__,
'args': _json.dumps(data, **dumps_kwargs),
'un': creds['username'], 'key': creds['api_key'], 'origin': 'plot',
'kwargs': _json.dumps(kwargs, **dumps_kwargs)
}
url = '{plotly_domain}/clientresp'.format(**cfg)
response = request('post', url, data=payload)
# Old functionality, just keeping it around.
parsed_content = response.json()
if parsed_content.get('warning'):
warnings.warn(parsed_content['warning'])
if parsed_content.get('message'):
print(parsed_content['message'])
return response
| 29.6 | 78 | 0.677177 |
43bcb53b30b218647164141e67e2c15e8751be5f
| 1,967 |
py
|
Python
|
contextaware_processors/response.py
|
kezabelle/django-contextaware-processors
|
7f5bd32b73032fa5b9208f62c2b4438c13a2d78b
|
[
"BSD-2-Clause"
] | null | null | null |
contextaware_processors/response.py
|
kezabelle/django-contextaware-processors
|
7f5bd32b73032fa5b9208f62c2b4438c13a2d78b
|
[
"BSD-2-Clause"
] | null | null | null |
contextaware_processors/response.py
|
kezabelle/django-contextaware-processors
|
7f5bd32b73032fa5b9208f62c2b4438c13a2d78b
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.template.response import TemplateResponse
__all__ = ['AlreadyRendered', 'ContextawareTemplateResponse']
class AlreadyRendered(TypeError): pass
def update_context_from_callbacks(request, context, callbacks):
for context_callback in callbacks:
newcontext = context_callback(request=request, context=context)
# if a callback marks itself as irrelevant, skip to the
# next processor in the list.
if newcontext is NotImplemented:
continue
if newcontext is not None:
context.update(newcontext)
return context
class ContextawareTemplateResponse(TemplateResponse):
rendering_attrs = TemplateResponse.rendering_attrs + ['_post_context_callbacks']
def __init__(self, request, template, context=None, content_type=None,
status=None, charset=None, using=None):
super(ContextawareTemplateResponse, self).__init__(
request, template, context=context, content_type=content_type,
status=status, charset=charset, using=using)
self._post_context_callbacks = []
def add_context_callback(self, callback):
if self._is_rendered:
raise AlreadyRendered("Cannot apply a new context-mutating "
"callback after rendering the content, "
"without having to re-render it")
self._post_context_callbacks.append(callback)
def update_context_from_callbacks(self, context):
return update_context_from_callbacks(request=self._request, context=context,
callbacks=self._post_context_callbacks)
def resolve_context(self, context):
ctx = super(ContextawareTemplateResponse, self).resolve_context(context=context)
ctx = self.update_context_from_callbacks(context=ctx)
return ctx
| 38.568627 | 88 | 0.689375 |
5cec1e757a166bee97afa6b1cc079410ce4dcc60
| 5,341 |
py
|
Python
|
app/user/tests/test_user_api.py
|
masoud91/recipe
|
88481ae2f22e6ac1f8081be0ce699039ffc645b1
|
[
"MIT"
] | 3 |
2020-08-10T14:14:00.000Z
|
2021-08-08T10:21:56.000Z
|
app/user/tests/test_user_api.py
|
masoud91/recipe
|
88481ae2f22e6ac1f8081be0ce699039ffc645b1
|
[
"MIT"
] | null | null | null |
app/user/tests/test_user_api.py
|
masoud91/recipe
|
88481ae2f22e6ac1f8081be0ce699039ffc645b1
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTest(TestCase):
def setUp(self) -> None:
self.client = APIClient()
def test_create_valid_user_success(self):
"""test creating user with valid payload is successful"""
payload = {
'email': '[email protected]',
'password': 'demo1234',
'name': 'Demo User',
}
response = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**response.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', response.data)
def test_user_exist(self):
"""Test creating a user that already exists"""
payload = {
'email': '[email protected]',
'password': 'demo1234',
}
create_user(**payload)
response = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that password must be more than 5 characters"""
payload = {
'email': '[email protected]',
'password': 'short',
}
response = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""Test that token is created for the user"""
payload = {
'email': '[email protected]',
'password': 'demo1234',
}
create_user(**payload)
response = self.client.post(TOKEN_URL, payload)
self.assertIn('token', response.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_create_token_with_invalid_credential(self):
"""Test that token is not created if invalid credentials are given"""
create_user(**{
'email': '[email protected]',
'password': 'demo1234',
})
payload = {
'email': '[email protected]',
'password': 'WrongPassword',
}
response = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', response.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test that token is not created if user does not exists"""
payload = {
'email': '[email protected]',
'password': 'demo1234',
}
response = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', response.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that email and password are required"""
payload = {
'email': 'demo',
'password': '',
}
response = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', response.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_authorized(self):
"""Test that authentication is required for users"""
response = self.client.get(ME_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class ProtectedUserApiTest(TestCase):
"""Test api requests that require authentication"""
def setUp(self) -> None:
self.user = create_user(
email='[email protected]',
password='pass123456',
name='user one',
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""Test retrieving profile for logged in used"""
response = self.client.get(ME_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual({
'name': self.user.name,
'email': self.user.email,
}, response.data)
self.assertNotIn('password', response.data)
def test_post_me_not_allowed(self):
"""Test that POST in not allowed on user/me url"""
response = self.client.post(ME_URL, {})
self.assertEqual(
response.status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
def test_update_user_profile(self):
"""Test updating the user profile for authenticated user"""
payload = {
'name': 'name edited',
'password': 'changed',
}
response = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
| 31.982036 | 77 | 0.627598 |
029142861467283b04bed19dae8b5258610b5426
| 37,237 |
py
|
Python
|
blobfile/ops_test.py
|
jpambrun/blobfile
|
16a7c183e49fcc2a7c95af4f2a63b60cb9a55e24
|
[
"Unlicense"
] | null | null | null |
blobfile/ops_test.py
|
jpambrun/blobfile
|
16a7c183e49fcc2a7c95af4f2a63b60cb9a55e24
|
[
"Unlicense"
] | null | null | null |
blobfile/ops_test.py
|
jpambrun/blobfile
|
16a7c183e49fcc2a7c95af4f2a63b60cb9a55e24
|
[
"Unlicense"
] | null | null | null |
# https://github.com/tensorflow/tensorflow/issues/27023
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
import random
import string
import tempfile
import os
import contextlib
import json
import urllib.request
import hashlib
import time
import subprocess as sp
import multiprocessing as mp
import platform
import base64
import av
import pytest
from tensorflow.io import gfile
import imageio
import numpy as np
import blobfile as bf
from blobfile import ops, azure
GCS_TEST_BUCKET = "csh-test-3"
AS_TEST_ACCOUNT = "cshteststorage2"
AS_TEST_CONTAINER = "testcontainer2"
AZURE_VALID_CONTAINER = (
f"https://{AS_TEST_ACCOUNT}.blob.core.windows.net/{AS_TEST_CONTAINER}"
)
AZURE_INVALID_CONTAINER = f"https://{AS_TEST_ACCOUNT}.blob.core.windows.net/{AS_TEST_CONTAINER}-does-not-exist"
AZURE_INVALID_ACCOUNT = f"https://{AS_TEST_ACCOUNT}-does-not-exist.blob.core.windows.net/{AS_TEST_CONTAINER}"
GCS_VALID_BUCKET = f"gs://{GCS_TEST_BUCKET}"
GCS_INVALID_BUCKET = f"gs://{GCS_TEST_BUCKET}-does-not-exist"
# only run this for our docker tests, this tells gcloud to use the credentials supplied by the
# test running script
@pytest.mark.skipif(platform.system() != "Linux")
@pytest.fixture(scope="session", autouse=True)
def setup_gcloud_auth():
sp.run(
[
"gcloud",
"auth",
"activate-service-account",
f"--key-file={os.environ['GOOGLE_APPLICATION_CREDENTIALS']}",
]
)
yield
@contextlib.contextmanager
def chdir(path):
original_path = os.getcwd()
os.chdir(path)
yield
os.chdir(original_path)
@contextlib.contextmanager
def _get_temp_local_path():
with tempfile.TemporaryDirectory() as tmpdir:
assert isinstance(tmpdir, str)
path = os.path.join(tmpdir, "file.name")
yield path
@contextlib.contextmanager
def _get_temp_gcs_path():
path = f"gs://{GCS_TEST_BUCKET}/" + "".join(
random.choice(string.ascii_lowercase) for i in range(16)
)
gfile.mkdir(path)
yield path + "/file.name"
gfile.rmtree(path)
@contextlib.contextmanager
def _get_temp_as_path():
random_id = "".join(random.choice(string.ascii_lowercase) for i in range(16))
path = (
f"https://{AS_TEST_ACCOUNT}.blob.core.windows.net/{AS_TEST_CONTAINER}/"
+ random_id
)
yield path + "/file.name"
sp.run(
[
"az",
"storage",
"blob",
"delete-batch",
"--account-name",
AS_TEST_ACCOUNT,
"--source",
AS_TEST_CONTAINER,
"--pattern",
f"{random_id}/*",
],
check=True,
shell=platform.system() == "Windows",
)
def _write_contents(path, contents):
if ".blob.core.windows.net" in path:
with tempfile.TemporaryDirectory() as tmpdir:
assert isinstance(tmpdir, str)
account, container, blob = azure.split_url(path)
filepath = os.path.join(tmpdir, "tmp")
with open(filepath, "wb") as f:
f.write(contents)
sp.run(
[
"az",
"storage",
"blob",
"upload",
"--account-name",
account,
"--container-name",
container,
"--name",
blob,
"--file",
filepath,
],
check=True,
shell=platform.system() == "Windows",
stdout=sp.DEVNULL,
stderr=sp.DEVNULL,
)
else:
with gfile.GFile(path, "wb") as f:
f.write(contents)
def _read_contents(path):
if ".blob.core.windows.net" in path:
with tempfile.TemporaryDirectory() as tmpdir:
assert isinstance(tmpdir, str)
account, container, blob = azure.split_url(path)
filepath = os.path.join(tmpdir, "tmp")
sp.run(
[
"az",
"storage",
"blob",
"download",
"--account-name",
account,
"--container-name",
container,
"--name",
blob,
"--file",
filepath,
],
check=True,
shell=platform.system() == "Windows",
stdout=sp.DEVNULL,
stderr=sp.DEVNULL,
)
with open(filepath, "rb") as f:
return f.read()
else:
with gfile.GFile(path, "rb") as f:
return f.read()
def test_basename():
testcases = [
("/", ""),
("a/", ""),
("a", "a"),
("a/b", "b"),
("", ""),
("gs://a", ""),
("gs://a/", ""),
("gs://a/b/", ""),
("gs://a/b", "b"),
("gs://a/b/c/test.filename", "test.filename"),
("https://a.blob.core.windows.net/b", ""),
("https://a.blob.core.windows.net/b/", ""),
("https://a.blob.core.windows.net/b/c/", ""),
("https://a.blob.core.windows.net/b/c", "c"),
("https://a.blob.core.windows.net/b/c/test.filename", "test.filename"),
]
for input_, desired_output in testcases:
actual_output = bf.basename(input_)
assert desired_output == actual_output
def test_dirname():
testcases = [
("a", ""),
("a/b", "a"),
("a/b/c", "a/b"),
("a/b/c/", "a/b/c"),
("a/b/c/////", "a/b/c"),
("", ""),
("gs://a", "gs://a"),
("gs://a/", "gs://a"),
("gs://a/////", "gs://a"),
("gs://a/b", "gs://a"),
("gs://a/b/c/test.filename", "gs://a/b/c"),
("gs://a/b/c/", "gs://a/b"),
("gs://a/b/c/////", "gs://a/b"),
(
"https://a.blob.core.windows.net/container",
"https://a.blob.core.windows.net/container",
),
(
"https://a.blob.core.windows.net/container/",
"https://a.blob.core.windows.net/container",
),
(
"https://a.blob.core.windows.net/container/////",
"https://a.blob.core.windows.net/container",
),
(
"https://a.blob.core.windows.net/container/b",
"https://a.blob.core.windows.net/container",
),
(
"https://a.blob.core.windows.net/container/b/c/test.filename",
"https://a.blob.core.windows.net/container/b/c",
),
(
"https://a.blob.core.windows.net/container/b/c/",
"https://a.blob.core.windows.net/container/b",
),
(
"https://a.blob.core.windows.net/container/b/c//////",
"https://a.blob.core.windows.net/container/b",
),
]
for input_, desired_output in testcases:
actual_output = bf.dirname(input_)
assert desired_output == actual_output, f"{input_}"
def test_join():
testcases = [
("a", "b", "a/b"),
("a/b", "c", "a/b/c"),
("a/b/", "c", "a/b/c"),
("a/b/", "c/", "a/b/c/"),
("a/b/", "/c/", "/c/"),
("", "", ""),
("gs://a", "b", "gs://a/b"),
("gs://a/b", "c", "gs://a/b/c"),
("gs://a/b/", "c", "gs://a/b/c"),
("gs://a/b/", "c/", "gs://a/b/c/"),
("gs://a/b/", "/c/", "gs://a/c/"),
("gs://a/b/", "../c", "gs://a/c"),
("gs://a/b/", "../c/", "gs://a/c/"),
("gs://a/b/", "../../c/", "gs://a/c/"),
(
"https://a.blob.core.windows.net/container",
"b",
"https://a.blob.core.windows.net/container/b",
),
(
"https://a.blob.core.windows.net/container/b",
"c",
"https://a.blob.core.windows.net/container/b/c",
),
(
"https://a.blob.core.windows.net/container/b/",
"c",
"https://a.blob.core.windows.net/container/b/c",
),
(
"https://a.blob.core.windows.net/container/b/",
"c/",
"https://a.blob.core.windows.net/container/b/c/",
),
(
"https://a.blob.core.windows.net/container/b/",
"/c/",
"https://a.blob.core.windows.net/container/c/",
),
(
"https://a.blob.core.windows.net/container/b/",
"../c",
"https://a.blob.core.windows.net/container/c",
),
(
"https://a.blob.core.windows.net/container/b/",
"../c/",
"https://a.blob.core.windows.net/container/c/",
),
(
"https://a.blob.core.windows.net/container/b/",
"../../c/",
"https://a.blob.core.windows.net/container/c/",
),
]
for input_a, input_b, desired_output in testcases:
actual_output = bf.join(input_a, input_b)
assert desired_output == actual_output, f"{input_a} {input_b}"
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_get_url(ctx):
contents = b"meow!"
with ctx() as path:
_write_contents(path, contents)
url, _ = bf.get_url(path)
assert urllib.request.urlopen(url).read() == contents
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
@pytest.mark.parametrize("streaming", [True, False])
def test_read_write(ctx, streaming):
contents = b"meow!\npurr\n"
with ctx() as path:
path = bf.join(path, "a folder", "a.file")
bf.makedirs(bf.dirname(path))
with bf.BlobFile(path, "wb", streaming=streaming) as w:
w.write(contents)
with bf.BlobFile(path, "rb", streaming=streaming) as r:
assert r.read() == contents
with bf.BlobFile(path, "rb", streaming=streaming) as r:
lines = list(r)
assert b"".join(lines) == contents
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_append(ctx):
contents = b"meow!\n"
additional_contents = b"purr\n"
with ctx() as path:
with bf.LocalBlobFile(path, "ab") as w:
w.write(contents)
with bf.LocalBlobFile(path, "ab") as w:
w.write(additional_contents)
with bf.BlobFile(path, "rb") as r:
assert r.read() == contents + additional_contents
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_stat(ctx):
contents = b"meow!"
with ctx() as path:
_write_contents(path, contents)
s = bf.stat(path)
assert s.size == len(contents)
assert 0 <= abs(time.time() - s.mtime) <= 5
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_remove(ctx):
contents = b"meow!"
with ctx() as path:
_write_contents(path, contents)
assert bf.exists(path)
bf.remove(path)
assert not bf.exists(path)
@pytest.mark.parametrize(
# don't test local path because that has slightly different behavior
"ctx",
[_get_temp_gcs_path, _get_temp_as_path],
)
def test_rmdir(ctx):
contents = b"meow!"
with ctx() as path:
dirpath = bf.dirname(path)
# this is an error for a local path but not for a blob path
bf.rmdir(bf.join(dirpath, "fakedirname"))
new_dirpath = bf.join(dirpath, "dirname")
bf.makedirs(new_dirpath)
assert bf.exists(new_dirpath)
bf.rmdir(new_dirpath)
assert not bf.exists(new_dirpath)
# double delete is fine
bf.rmdir(new_dirpath)
# implicit dir
new_filepath = bf.join(dirpath, "dirname", "name")
_write_contents(new_filepath, contents)
with pytest.raises(OSError):
# not empty dir
bf.rmdir(new_dirpath)
bf.remove(new_filepath)
bf.rmdir(new_dirpath)
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_makedirs(ctx):
contents = b"meow!"
with ctx() as path:
dirpath = bf.join(path, "x", "x", "x")
bf.makedirs(dirpath)
assert bf.exists(dirpath)
_write_contents(bf.join(dirpath, "testfile"), contents)
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_isdir(ctx):
contents = b"meow!"
with ctx() as path:
assert not bf.isdir(path)
_write_contents(path, contents)
assert not bf.isdir(path)
dirpath = path + ".dir"
bf.makedirs(dirpath)
assert bf.isdir(dirpath)
assert not bf.isdir(dirpath[:-1])
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_listdir(ctx):
contents = b"meow!"
with ctx() as path:
dirpath = bf.dirname(path)
a_path = bf.join(dirpath, "a")
with bf.BlobFile(a_path, "wb") as w:
w.write(contents)
b_path = bf.join(dirpath, "b")
with bf.BlobFile(b_path, "wb") as w:
w.write(contents)
bf.makedirs(bf.join(dirpath, "c"))
assert sorted(list(bf.listdir(dirpath))) == ["a", "b", "c"]
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_listdir_sharded(ctx):
contents = b"meow!"
with ctx() as path:
dirpath = bf.dirname(path)
with bf.BlobFile(bf.join(dirpath, "a"), "wb") as w:
w.write(contents)
with bf.BlobFile(bf.join(dirpath, "aa"), "wb") as w:
w.write(contents)
with bf.BlobFile(bf.join(dirpath, "b"), "wb") as w:
w.write(contents)
with bf.BlobFile(bf.join(dirpath, "ca"), "wb") as w:
w.write(contents)
bf.makedirs(bf.join(dirpath, "c"))
with bf.BlobFile(bf.join(dirpath, "c/a"), "wb") as w:
w.write(contents)
# this should also test shard_prefix_length=2 but that takes too long
assert sorted(list(bf.listdir(dirpath, shard_prefix_length=1))) == [
"a",
"aa",
"b",
"c",
"ca",
]
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
@pytest.mark.parametrize("topdown", [False, True])
def test_walk(ctx, topdown):
contents = b"meow!"
with ctx() as path:
dirpath = bf.dirname(path)
a_path = bf.join(dirpath, "a")
with bf.BlobFile(a_path, "wb") as w:
w.write(contents)
bf.makedirs(bf.join(dirpath, "c/d"))
b_path = bf.join(dirpath, "c/d/b")
with bf.BlobFile(b_path, "wb") as w:
w.write(contents)
expected = [
(dirpath, ["c"], ["a"]),
(bf.join(dirpath, "c"), ["d"], []),
(bf.join(dirpath, "c", "d"), [], ["b"]),
]
if not topdown:
expected = list(reversed(expected))
assert list(bf.walk(dirpath, topdown=topdown)) == expected
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
@pytest.mark.parametrize("parallel", [False, True])
def test_glob(ctx, parallel):
contents = b"meow!"
with ctx() as path:
dirpath = bf.dirname(path)
a_path = bf.join(dirpath, "ab")
with bf.BlobFile(a_path, "wb") as w:
w.write(contents)
b_path = bf.join(dirpath, "bb")
with bf.BlobFile(b_path, "wb") as w:
w.write(contents)
def assert_listing_equal(path, desired):
desired = sorted([bf.join(dirpath, p) for p in desired])
actual = sorted(list(bf.glob(path, parallel=parallel)))
assert actual == desired, f"{actual} != {desired}"
assert_listing_equal(bf.join(dirpath, "*b"), ["ab", "bb"])
assert_listing_equal(bf.join(dirpath, "a*"), ["ab"])
assert_listing_equal(bf.join(dirpath, "ab*"), ["ab"])
assert_listing_equal(bf.join(dirpath, "*"), ["ab", "bb"])
assert_listing_equal(bf.join(dirpath, "bb"), ["bb"])
path = bf.join(dirpath, "test.txt")
with bf.BlobFile(path, "wb") as w:
w.write(contents)
path = bf.join(dirpath, "subdir", "test.txt")
bf.makedirs(bf.dirname(path))
with bf.BlobFile(path, "wb") as f:
f.write(contents)
path = bf.join(dirpath, "subdir", "subsubdir", "test.txt")
if "://" not in path:
# implicit directory
bf.makedirs(bf.dirname(path))
with bf.BlobFile(path, "wb") as f:
f.write(contents)
assert_listing_equal(bf.join(dirpath, "*/test.txt"), ["subdir/test.txt"])
assert_listing_equal(bf.join(dirpath, "*/*.txt"), ["subdir/test.txt"])
if "://" in path:
# local glob doesn't handle ** the same way as remote glob
assert_listing_equal(
bf.join(dirpath, "**.txt"),
["test.txt", "subdir/test.txt", "subdir/subsubdir/test.txt"],
)
else:
assert_listing_equal(bf.join(dirpath, "**.txt"), ["test.txt"])
assert_listing_equal(bf.join(dirpath, "*/test"), [])
assert_listing_equal(bf.join(dirpath, "subdir/test.txt"), ["subdir/test.txt"])
# directories
assert_listing_equal(bf.join(dirpath, "*"), ["ab", "bb", "subdir", "test.txt"])
assert_listing_equal(bf.join(dirpath, "subdir"), ["subdir"])
assert_listing_equal(bf.join(dirpath, "subdir/"), ["subdir"])
assert_listing_equal(bf.join(dirpath, "*/"), ["subdir"])
assert_listing_equal(bf.join(dirpath, "*dir"), ["subdir"])
assert_listing_equal(bf.join(dirpath, "subdir/*dir"), ["subdir/subsubdir"])
assert_listing_equal(bf.join(dirpath, "subdir/*dir/"), ["subdir/subsubdir"])
assert_listing_equal(bf.join(dirpath, "su*ir/*dir/"), ["subdir/subsubdir"])
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_rmtree(ctx):
contents = b"meow!"
with ctx() as path:
root = bf.dirname(path)
destroy_path = bf.join(root, "destroy")
bf.makedirs(destroy_path)
save_path = bf.join(root, "save")
bf.makedirs(save_path)
# implicit dir
if not "://" in path:
bf.makedirs(bf.join(destroy_path, "adir"))
with bf.BlobFile(bf.join(destroy_path, "adir/b"), "wb") as w:
w.write(contents)
# explicit dir
bf.makedirs(bf.join(destroy_path, "bdir"))
with bf.BlobFile(bf.join(destroy_path, "bdir/b"), "wb") as w:
w.write(contents)
bf.makedirs(bf.join(save_path, "somedir"))
with bf.BlobFile(bf.join(save_path, "somefile"), "wb") as w:
w.write(contents)
def assert_listing_equal(path, desired):
actual = list(bf.walk(path))
# ordering of os walk is weird, only compare sorted order
assert sorted(actual) == sorted(desired), f"{actual} != {desired}"
assert_listing_equal(
root,
[
(root, ["destroy", "save"], []),
(destroy_path, ["adir", "bdir"], []),
(bf.join(destroy_path, "adir"), [], ["b"]),
(bf.join(destroy_path, "bdir"), [], ["b"]),
(save_path, ["somedir"], ["somefile"]),
(bf.join(save_path, "somedir"), [], []),
],
)
bf.rmtree(destroy_path)
assert_listing_equal(
root,
[
(root, ["save"], []),
(save_path, ["somedir"], ["somefile"]),
(bf.join(save_path, "somedir"), [], []),
],
)
def test_copy():
contents = b"meow!"
with _get_temp_local_path() as local_path1, _get_temp_local_path() as local_path2, _get_temp_local_path() as local_path3, _get_temp_gcs_path() as gcs_path1, _get_temp_gcs_path() as gcs_path2, _get_temp_as_path() as as_path1, _get_temp_as_path() as as_path2:
with pytest.raises(FileNotFoundError):
bf.copy(gcs_path1, gcs_path2)
with pytest.raises(FileNotFoundError):
bf.copy(as_path1, as_path2)
_write_contents(local_path1, contents)
testcases = [
(local_path1, local_path2),
(local_path1, gcs_path1),
(gcs_path1, gcs_path2),
(gcs_path2, as_path1),
(as_path1, as_path2),
(as_path2, local_path3),
]
for src, dst in testcases:
h = bf.copy(src, dst, return_md5=True)
assert h == hashlib.md5(contents).hexdigest()
assert _read_contents(dst) == contents
with pytest.raises(FileExistsError):
bf.copy(src, dst)
bf.copy(src, dst, overwrite=True)
assert _read_contents(dst) == contents
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_exists(ctx):
contents = b"meow!"
with ctx() as path:
assert not bf.exists(path)
_write_contents(path, contents)
assert bf.exists(path)
def test_more_exists():
testcases = [
(AZURE_INVALID_CONTAINER, False),
(AZURE_INVALID_CONTAINER + "/", False),
(AZURE_INVALID_CONTAINER + "//", False),
(AZURE_INVALID_CONTAINER + "/invalid.file", False),
(GCS_INVALID_BUCKET, False),
(GCS_INVALID_BUCKET + "/", False),
(GCS_INVALID_BUCKET + "//", False),
(GCS_INVALID_BUCKET + "/invalid.file", False),
(AZURE_INVALID_ACCOUNT, False),
(AZURE_INVALID_ACCOUNT + "/", False),
(AZURE_INVALID_ACCOUNT + "//", False),
(AZURE_INVALID_ACCOUNT + "/invalid.file", False),
(AZURE_VALID_CONTAINER, True),
(AZURE_VALID_CONTAINER + "/", True),
(AZURE_VALID_CONTAINER + "//", False),
(AZURE_VALID_CONTAINER + "/invalid.file", False),
(GCS_VALID_BUCKET, True),
(GCS_VALID_BUCKET + "/", True),
(GCS_VALID_BUCKET + "//", False),
(GCS_VALID_BUCKET + "/invalid.file", False),
(f"/does-not-exist", False),
(f"/", True),
]
for path, should_exist in testcases:
assert bf.exists(path) == should_exist
@pytest.mark.parametrize(
"base_path", [AZURE_INVALID_ACCOUNT, AZURE_INVALID_CONTAINER, GCS_INVALID_BUCKET]
)
def test_invalid_paths(base_path):
for suffix in ["", "/", "//", "/invalid.file", "/invalid/dir/"]:
path = base_path + suffix
print(path)
if path.endswith("/"):
expected_error = IsADirectoryError
else:
expected_error = FileNotFoundError
list(bf.glob(path))
if suffix == "":
for pattern in ["*", "**"]:
try:
list(bf.glob(path + pattern))
except bf.Error as e:
assert "Wildcards cannot be used" in e.message
else:
for pattern in ["*", "**"]:
list(bf.glob(path + pattern))
with pytest.raises(FileNotFoundError):
list(bf.listdir(path))
assert not bf.exists(path)
assert not bf.isdir(path)
with pytest.raises(expected_error):
bf.remove(path)
if suffix in ("", "/"):
try:
bf.rmdir(path)
except bf.Error as e:
assert "Cannot delete bucket" in e.message
else:
bf.rmdir(path)
with pytest.raises(NotADirectoryError):
bf.rmtree(path)
with pytest.raises(FileNotFoundError):
bf.stat(path)
if base_path == AZURE_INVALID_ACCOUNT:
with pytest.raises(bf.Error):
bf.get_url(path)
else:
bf.get_url(path)
with pytest.raises(FileNotFoundError):
bf.md5(path)
with pytest.raises(bf.Error):
bf.makedirs(path)
list(bf.walk(path))
with tempfile.TemporaryDirectory() as tmpdir:
local_path = os.path.join(tmpdir, "test.txt")
with pytest.raises(expected_error):
bf.copy(path, local_path)
with open(local_path, "w") as f:
f.write("meow")
with pytest.raises(expected_error):
bf.copy(local_path, path)
for streaming in [False, True]:
with pytest.raises(expected_error):
with bf.BlobFile(path, "rb", streaming=streaming) as f:
f.read()
with pytest.raises(expected_error):
with bf.BlobFile(path, "wb", streaming=streaming) as f:
f.write(b"meow")
@pytest.mark.parametrize("buffer_size", [1, 100])
@pytest.mark.parametrize("ctx", [_get_temp_gcs_path, _get_temp_as_path])
def test_read_stats(buffer_size, ctx):
with ctx() as path:
contents = b"meow!"
with bf.BlobFile(path, "wb") as w:
w.write(contents)
with bf.BlobFile(path, "rb", buffer_size=buffer_size) as r:
r.read(1)
if buffer_size == 1:
assert r.raw.bytes_read == 1 # type: ignore
else:
assert r.raw.bytes_read == len(contents) # type: ignore
with bf.BlobFile(path, "rb", buffer_size=buffer_size) as r:
r.read(1)
r.seek(4)
r.read(1)
if buffer_size == 1:
assert r.raw.requests == 2 # type: ignore
assert r.raw.bytes_read == 2 # type: ignore
else:
assert r.raw.requests == 1 # type: ignore
assert r.raw.bytes_read == len(contents) # type: ignore
@pytest.mark.parametrize("ctx", [_get_temp_gcs_path, _get_temp_as_path])
def test_cache_dir(ctx):
cache_dir = tempfile.mkdtemp()
contents = b"meow!"
alternative_contents = b"purr!"
with ctx() as path:
with bf.BlobFile(path, mode="wb") as f:
f.write(contents)
with bf.LocalBlobFile(path, mode="rb", cache_dir=cache_dir) as f:
assert f.read() == contents
content_hash = hashlib.md5(contents).hexdigest()
cache_path = bf.join(cache_dir, content_hash, bf.basename(path))
with open(cache_path, "rb") as f:
assert f.read() == contents
# alter the cached file to make sure we are not re-reading the remote file
with open(cache_path, "wb") as f:
f.write(alternative_contents)
with bf.LocalBlobFile(path, mode="rb", cache_dir=cache_dir) as f:
assert f.read() == alternative_contents
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_truncation(ctx):
chunk_size = 2 ** 20
contents = b"\x00" * chunk_size * 3
alternative_contents = b"\xFF" * chunk_size * 2
with ctx() as path:
with bf.BlobFile(path, "wb") as f:
f.write(contents)
with bf.BlobFile(path, "rb") as f:
read_contents = f.read(chunk_size)
with bf.BlobFile(path, "wb") as f2:
f2.write(alternative_contents)
# close underlying connection
f.raw._f = None # type: ignore
read_contents += f.read(chunk_size)
read_contents += f.read(chunk_size)
assert (
read_contents
== contents[:chunk_size]
+ alternative_contents[chunk_size : chunk_size * 2]
)
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_overwrite_while_reading(ctx):
chunk_size = 2 ** 20
contents = b"\x00" * chunk_size * 2
alternative_contents = b"\xFF" * chunk_size * 4
with ctx() as path:
with bf.BlobFile(path, "wb") as f:
f.write(contents)
with bf.BlobFile(path, "rb") as f:
read_contents = f.read(chunk_size)
with bf.BlobFile(path, "wb") as f2:
f2.write(alternative_contents)
# close underlying connection
f.raw._f = None # type: ignore
read_contents += f.read(chunk_size)
assert (
read_contents
== contents[:chunk_size]
+ alternative_contents[chunk_size : chunk_size * 2]
)
def test_create_local_intermediate_dirs():
contents = b"meow"
with _get_temp_local_path() as path:
dirpath = bf.dirname(path)
with chdir(dirpath):
for filepath in [
bf.join(dirpath, "dirname", "file.name"),
bf.join("..", bf.basename(dirpath), "file.name"),
"./file.name",
"file.name",
]:
with bf.BlobFile(filepath, "wb") as f:
f.write(contents)
@pytest.mark.parametrize("binary", [True, False])
@pytest.mark.parametrize("blobfile", [bf.BlobFile, bf.LocalBlobFile])
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_more_read_write(binary, blobfile, ctx):
rng = np.random.RandomState(0)
with ctx() as path:
if binary:
read_mode = "rb"
write_mode = "wb"
else:
read_mode = "r"
write_mode = "w"
with blobfile(path, write_mode) as w:
pass
with blobfile(path, read_mode) as r:
assert len(r.read()) == 0
contents = b"meow!"
if not binary:
contents = contents.decode("utf8")
with blobfile(path, write_mode) as w:
w.write(contents)
with blobfile(path, read_mode) as r:
assert r.read(1) == contents[:1]
assert r.read() == contents[1:]
assert len(r.read()) == 0
with blobfile(path, read_mode) as r:
for i in range(len(contents)):
assert r.read(1) == contents[i : i + 1]
assert len(r.read()) == 0
assert len(r.read()) == 0
contents = b"meow!\n\nmew!\n"
lines = [b"meow!\n", b"\n", b"mew!\n"]
if not binary:
contents = contents.decode("utf8")
lines = [line.decode("utf8") for line in lines]
with blobfile(path, write_mode) as w:
w.write(contents)
with blobfile(path, read_mode) as r:
assert r.readlines() == lines
with blobfile(path, read_mode) as r:
assert [line for line in r] == lines
if binary:
for size in [2 * 2 ** 20, 12_345_678]:
contents = rng.randint(0, 256, size=size, dtype=np.uint8).tobytes()
with blobfile(path, write_mode) as w:
w.write(contents)
with blobfile(path, read_mode) as r:
size = rng.randint(0, 1_000_000)
buf = b""
while True:
b = r.read(size)
if b == b"":
break
buf += b
assert buf == contents
else:
obj = {"a": 1}
with blobfile(path, write_mode) as w:
json.dump(obj, w)
with blobfile(path, read_mode) as r:
assert json.load(r) == obj
@pytest.mark.parametrize("blobfile", [bf.BlobFile, bf.LocalBlobFile])
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_video(blobfile, ctx):
rng = np.random.RandomState(0)
shape = (256, 64, 64, 3)
video_data = rng.randint(0, 256, size=np.prod(shape), dtype=np.uint8).reshape(shape)
with ctx() as path:
with blobfile(path, mode="wb") as wf:
with imageio.get_writer(
wf,
format="ffmpeg",
quality=None,
codec="libx264rgb",
pixelformat="bgr24",
output_params=["-f", "mp4", "-crf", "0"],
) as w:
for frame in video_data:
w.append_data(frame)
with blobfile(path, mode="rb") as rf:
with imageio.get_reader(
rf, format="ffmpeg", input_params=["-f", "mp4"]
) as r:
for idx, frame in enumerate(r):
assert np.array_equal(frame, video_data[idx])
with blobfile(path, mode="rb") as rf:
container = av.open(rf)
stream = container.streams.video[0]
for idx, frame in enumerate(container.decode(stream)):
assert np.array_equal(frame.to_image(), video_data[idx])
@pytest.mark.parametrize(
"ctx",
[_get_temp_local_path]
# disable remote backends because they are super slow
# "ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_large_file(ctx):
contents = b"0" * 2 ** 32
with ctx() as path:
with bf.BlobFile(path, "wb", streaming=True) as f:
f.write(contents)
with bf.BlobFile(path, "rb", streaming=True) as f:
assert contents == f.read()
def test_composite_objects():
with _get_temp_gcs_path() as remote_path:
with _get_temp_local_path() as local_path:
contents = b"0" * 2 * 2 ** 20
with open(local_path, "wb") as f:
f.write(contents)
sp.run(
[
"gsutil",
"-o",
"GSUtil:parallel_composite_upload_threshold=1M",
"cp",
local_path,
remote_path,
],
check=True,
)
assert hashlib.md5(contents).hexdigest() == bf.md5(remote_path)
assert hashlib.md5(contents).hexdigest() == bf.md5(remote_path)
with tempfile.TemporaryDirectory() as tmpdir:
with bf.BlobFile(remote_path, "rb", cache_dir=tmpdir, streaming=False) as f:
assert f.read() == contents
@pytest.mark.parametrize(
"ctx", [_get_temp_local_path, _get_temp_gcs_path, _get_temp_as_path]
)
def test_md5(ctx):
contents = b"meow!"
meow_hash = hashlib.md5(contents).hexdigest()
with ctx() as path:
_write_contents(path, contents)
assert bf.md5(path) == meow_hash
with bf.BlobFile(path, "wb") as f:
f.write(contents)
assert bf.md5(path) == meow_hash
with bf.BlobFile(path, "wb") as f:
f.write(contents)
assert bf.md5(path) == meow_hash
@pytest.mark.parametrize("ctx", [_get_temp_as_path])
def test_azure_maybe_update_md5(ctx):
contents = b"meow!"
meow_hash = hashlib.md5(contents).hexdigest()
alternative_contents = b"purr"
purr_hash = hashlib.md5(alternative_contents).hexdigest()
with ctx() as path:
_write_contents(path, contents)
_isfile, metadata = ops._azure_isfile(path)
assert ops._azure_maybe_update_md5(path, metadata["ETag"], meow_hash)
_write_contents(path, alternative_contents)
assert not ops._azure_maybe_update_md5(path, metadata["ETag"], meow_hash)
_isfile, metadata = ops._azure_isfile(path)
assert base64.b64decode(metadata["Content-MD5"]).hex() == purr_hash
def _get_http_pool_id(q):
q.put(id(ops._get_http_pool()))
def test_fork():
q = mp.Queue()
# this reference should keep the old http client alive in the child process
# to ensure that a new one does not recycle the memory address
http1 = ops._get_http_pool()
parent1 = id(http1)
p = mp.Process(target=_get_http_pool_id, args=(q,))
p.start()
p.join()
http2 = ops._get_http_pool()
parent2 = id(http2)
child = q.get()
assert parent1 == parent2
assert child != parent1
| 34.225184 | 262 | 0.535569 |
ca1157a5249486b410ba20987f0c3141b208cee7
| 16,804 |
py
|
Python
|
tests/esok/config/test_connection_options.py
|
ahaeger/esok
|
9e60a663678ba2ae021afcfa0ee6dd9af4fe700e
|
[
"MIT"
] | 2 |
2021-06-03T18:51:10.000Z
|
2021-12-16T14:49:56.000Z
|
tests/esok/config/test_connection_options.py
|
ahaeger/esok
|
9e60a663678ba2ae021afcfa0ee6dd9af4fe700e
|
[
"MIT"
] | 4 |
2021-08-21T20:03:42.000Z
|
2021-08-21T22:43:07.000Z
|
tests/esok/config/test_connection_options.py
|
ahaeger/esok
|
9e60a663678ba2ae021afcfa0ee6dd9af4fe700e
|
[
"MIT"
] | null | null | null |
import click
import pytest
from esok.config.connection_options import per_connection
from esok.constants import CLI_ERROR, CONFIGURATION_ERROR, USER_ERROR
from esok.esok import esok
@pytest.mark.usefixtures("mock_clients")
def test_no_connection_options(runner):
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, [command])
assert r.exit_code == 0, "The command should succeed."
assert clients == [
"localhost"
], "Default connection should be used when no connection options are passed."
@pytest.mark.usefixtures("mock_clients")
def test_no_connection_options_and_sites_option(runner):
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-s", "eu", command])
assert r.exit_code == USER_ERROR, "Exit code should signal a user error."
assert "Error" in r.output
@pytest.mark.usefixtures("mock_clients")
def test_host_option(runner):
clients, command = _attach_sub_command(esok)
expected_host = "some_host"
r = runner.invoke(esok, ["-H", expected_host, command])
assert r.exit_code == 0, "The command should succeed."
assert clients == [expected_host], "Provided hostname should be used."
@pytest.mark.usefixtures("mock_clients")
def test_host_option_cannot_be_used_with_cluster(runner):
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-H", "some_host", "-c", "some_cluster", command])
assert r.exit_code == USER_ERROR, "Exit code should signal a user error."
assert "Error" in r.output, "There should be error output explaining the issue."
@pytest.mark.usefixtures("mock_clients")
def test_host_option_cannot_be_used_with_sites(runner):
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-H", "some_host", "-s", "site1,site2", command])
assert r.exit_code == USER_ERROR, "Exit code should signal a user error."
assert "Error" in r.output, "There should be error output explaining the issue."
@pytest.mark.usefixtures("mock_clients")
def test_cluster_with_missing_connection(runner):
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-c", "some_cluster", command])
assert r.exit_code == USER_ERROR, "Exit code should signal a user error."
assert "Error" in r.output, "There should be error output explaining the issue."
@pytest.mark.usefixtures("mock_clients")
def test_cluster_with_predefined_connections(user_config_file, runner):
user_config_file.write_text(
"""
[cluster:awesome-cluster]
eu = 192.168.0.1
us = some.es.cluster.example.com
ae = east-asia-host
"""
)
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-c", "awesome-cluster", command])
assert r.exit_code == 0, "The command should succeed."
assert "192.168.0.1" in clients, "Configured cluster should be part of the clients."
assert (
"some.es.cluster.example.com" in clients
), "Configured cluster should be part of the clients to connect to."
assert (
"east-asia-host" in clients
), "Configured cluster should be part of the clients to connect to."
@pytest.mark.usefixtures("mock_clients")
def test_cluster_with_predefined_connections_and_site_option(user_config_file, runner):
user_config_file.write_text(
"""
[cluster:awesome-cluster]
eu = 192.168.0.1
us = some.es.cluster.example.com
ap = asia-host
"""
)
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-c", "awesome-cluster", "-s", "eu", command])
assert r.exit_code == 0, "The command should succeed."
assert ["192.168.0.1"] == clients, "Specified cluster should be the only client."
@pytest.mark.usefixtures("mock_clients")
def test_cluster_with_predefined_connections_and_missing_site_option(
user_config_file, runner
):
user_config_file.write_text(
"""
[cluster:awesome-cluster]
us = some.es.cluster.example.com
ap = asia-host
"""
)
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-c", "awesome-cluster", "-s", "eu", command])
assert r.exit_code == USER_ERROR, "Exit code should signal a user error."
assert "Error" in r.output, "There should be error output explaining the issue."
@pytest.mark.usefixtures("mock_clients")
def test_cluster_with_invalid_pattern_config(user_config_file, runner):
user_config_file.write_text(
"""
[general]
cluster_hostname_pattern = my-cluster
"""
)
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-c", "favorite", command])
assert (
r.exit_code == CONFIGURATION_ERROR
), "Exit code should signal a configuration error."
assert "Error" in r.output
@pytest.mark.usefixtures("mock_clients")
def test_cluster_with_pattern_config_without_site(user_config_file, runner):
user_config_file.write_text(
"""
[general]
cluster_hostname_pattern = my-{cluster}-cluster
"""
)
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-c", "favorite", command])
assert r.exit_code == 0, "The command should succeed."
assert (
"my-favorite-cluster" in clients
), "Specified cluster should be the only client."
@pytest.mark.usefixtures("mock_clients")
def test_cluster_with_pattern_config_and_site_option_without_site_variable(
user_config_file, runner
):
user_config_file.write_text(
"""
[general]
cluster_hostname_pattern = my-{cluster}-cluster
"""
)
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-c", "favorite", "-s", "woops", command])
assert r.exit_code == USER_ERROR, "Exit code should signal a user error."
assert "Error" in r.output, "There should be error output explaining the issue."
@pytest.mark.usefixtures("mock_clients")
def test_cluster_with_pattern_config_and_missing_site(user_config_file, runner):
user_config_file.write_text(
"""
[general]
cluster_hostname_pattern = my-{cluster}-{site}-cluster
"""
)
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-c", "favorite", command])
assert r.exit_code == USER_ERROR, "Exit code should signal a user error."
assert "Error" in r.output, "There should be error output explaining the issue."
@pytest.mark.usefixtures("mock_clients")
def test_cluster_with_pattern_config_and_default_sites(user_config_file, runner):
user_config_file.write_text(
"""
[general]
cluster_hostname_pattern = my-{cluster}-{site}-cluster
cluster_pattern_default_sites = site1,site2
"""
)
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-c", "favorite", command])
assert r.exit_code == 0, "The command should succeed."
assert (
"my-favorite-site1-cluster" in clients
), "Specified cluster should be among the clients."
assert (
"my-favorite-site2-cluster" in clients
), "Specified cluster should be among the clients."
assert "site1" in r.output, "CLI should indicate which site it is connecting to."
assert "site2" in r.output, "CLI should indicate which site it is connecting to."
@pytest.mark.usefixtures("mock_clients")
def test_cluster_with_pattern_config_and_default_sites_without_site_variable(
user_config_file, runner
):
user_config_file.write_text(
"""
[general]
cluster_hostname_pattern = my-{cluster}-cluster
cluster_pattern_default_sites = site1,site2
"""
)
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-c", "favorite", command])
assert (
r.exit_code == CONFIGURATION_ERROR
), "Exit code should signal a configuration error."
assert "Error" in r.output, "There should be error output explaining the issue."
@pytest.mark.usefixtures("mock_clients")
def test_cluster_with_pattern_config_and_default_sites_and_site_option(
user_config_file, runner
):
user_config_file.write_text(
"""
[general]
cluster_hostname_pattern = my-{cluster}-{site}-cluster
cluster_pattern_default_sites = site1,site2
"""
)
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-c", "favorite", "-s", "site3", command])
assert r.exit_code == 0, "The command should succeed."
assert [
"my-favorite-site3-cluster"
] == clients, "Specified cluster should be the only client."
@pytest.mark.usefixtures("mock_clients")
def test_cluster_with_pattern_config_predefined_connections_takes_precedence(
user_config_file, runner
):
user_config_file.write_text(
"""
[general]
cluster_hostname_pattern = my-{cluster}-{site}-cluster
cluster_pattern_default_sites = site1,site2
[cluster:some-cluster]
region2 = host1
region1 = host2
"""
)
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-c", "some-cluster", command])
assert r.exit_code == 0, "The command should succeed."
assert "host1" in clients, "Specified cluster should be among the clients."
assert "host2" in clients, "Specified cluster should be among the clients."
assert "region1" in r.output, "CLI should indicate which site it is connecting to."
assert "region2" in r.output, "CLI should indicate which site it is connecting to."
@pytest.mark.usefixtures("mock_clients")
def test_cluster_with_pattern_config_(user_config_file, runner):
user_config_file.write_text(
"""
[general]
cluster_hostname_pattern = my-{cluster}-{site}-cluster
cluster_pattern_default_sites = site1,site2
[cluster:some-cluster]
region2 = host1
region1 = host2
"""
)
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-c", "some-cluster", command])
assert r.exit_code == 0, "The command should succeed."
assert "host1" in clients, "Specified cluster should be among the clients."
assert "host2" in clients, "Specified cluster should be among the clients."
assert "region1" in r.output, "CLI should indicate which site it is connecting to."
assert "region2" in r.output, "CLI should indicate which site it is connecting to."
@pytest.mark.usefixtures("mock_clients")
def test_default_connection_can_be_predefined_connection(user_config_file, runner):
user_config_file.write_text(
"""
[general]
default_connection = my-cluster
[cluster:my-cluster]
eu = host1
us = host2
"""
)
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-s", "eu", command])
assert r.exit_code == 0, "The command should succeed."
assert "host1" in clients, "Specified cluster should be the only client."
@pytest.mark.usefixtures("mock_clients")
def test_cluster_with_pattern_config_and_default_sites_are_stripped_of_whitespace(
user_config_file, runner
):
user_config_file.write_text(
"""
[general]
cluster_hostname_pattern = my-{cluster}-{site}-cluster
cluster_pattern_default_sites = site1, site2
"""
)
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-c", "favorite", command])
assert r.exit_code == 0, "The command should succeed."
assert (
"my-favorite-site1-cluster" in clients
), "Specified cluster should be among the clients."
assert (
"my-favorite-site2-cluster" in clients
), "Specified cluster should be among the clients."
@pytest.mark.usefixtures("mock_clients")
def test_include_site_should_pass_site(user_config_file, runner):
user_config_file.write_text(
"""
[cluster:my-cluster]
eu = host1
us = host2
"""
)
clients = list()
@esok.command()
@per_connection(include_site=True)
def sub(client, site):
clients.append((client[1]["hosts"][0], site))
r = runner.invoke(esok, ["-c", "my-cluster", "sub"])
assert r.exit_code == 0, "The command should succeed."
assert (
"host1",
"eu",
) in clients, "Configured site-to-host mapping should be preserved."
assert (
"host2",
"us",
) in clients, "Configured site-to-host mapping should be preserved."
@pytest.mark.usefixtures("mock_clients")
def test_developer_boo_boo_is_captured(runner):
"""This happens if @connection_options and Click's context object is not set
up properly, before @per_connection starts.
"""
@click.group()
def root():
pass
_, command = _attach_sub_command(root)
r = runner.invoke(root, [command])
assert r.exit_code == CLI_ERROR, "Exit code should signal an internal error."
assert "Error" in r.output, "There should be error output explaining the issue."
@pytest.mark.usefixtures("mock_clients")
def test_progress_is_printed_for_several_clients(user_config_file, runner):
user_config_file.write_text(
"""
[cluster:awesome-cluster]
eu = 192.168.0.1
us = some.es.cluster.example.com
ae = east-asia-host
"""
)
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-c", "awesome-cluster", command])
assert r.exit_code == 0, "The command should succeed."
assert (
"awesome-cluster - eu:\n" in r.output
), "CLI should output which cluster it is connecting to."
assert (
"awesome-cluster - us:\n" in r.output
), "CLI should output which cluster it is connecting to."
assert (
"awesome-cluster - ae:\n" in r.output
), "CLI should output which cluster it is connecting to."
assert "OK" in r.output, "CLI should indicate that command was successful."
@pytest.mark.usefixtures("mock_clients")
def test_no_progress_is_printed_for_only_one_client(user_config_file, runner):
user_config_file.write_text(
"""
[cluster:awesome-cluster]
eu = 192.168.0.1
"""
)
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-c", "awesome-cluster", command])
assert r.exit_code == 0, "The command should succeed."
assert (
r.output == ""
), "There should be no output when only one cluster is being connected to."
def test_bad_hostname(runner):
clients, command = _attach_sub_command(esok)
r = runner.invoke(esok, ["-H", "////////////////////", command])
assert r.exit_code == USER_ERROR
assert "Error" in r.output
@pytest.mark.usefixtures("mock_clients")
def test_tls(runner):
clients, command = _attach_sub_command(esok, hostname_only=False)
r = runner.invoke(esok, ["-T", command])
assert r.exit_code == 0
_, kwargs = clients[0]
assert kwargs["use_ssl"] is True
@pytest.mark.usefixtures("mock_clients")
def test_ca_certificate(runner, test_app_dir):
clients, command = _attach_sub_command(esok, hostname_only=False)
ca_file = test_app_dir / "ca.crt"
ca_file.touch()
r = runner.invoke(esok, ["-C", str(ca_file), command])
assert r.exit_code == 0
_, kwargs = clients[0]
assert kwargs["use_ssl"] is True
assert kwargs["ssl_context"] == str(ca_file)
@pytest.mark.usefixtures("mock_clients")
def test_user_password_prompt(runner):
clients, command = _attach_sub_command(esok, hostname_only=False)
user = "super user"
password = "super secret"
r = runner.invoke(esok, ["-u", user, command], input=password)
assert r.exit_code == 0
_, kwargs = clients[0]
assert kwargs["http_auth"] == (user, password)
@pytest.mark.usefixtures("mock_clients")
def test_timeout(runner):
clients, command = _attach_sub_command(esok, hostname_only=False)
timeout = 5
r = runner.invoke(esok, ["-t", str(timeout), command])
assert r.exit_code == 0
_, kwargs = clients[0]
assert kwargs["timeout"] == timeout
def _attach_sub_command(root_command, hostname_only=True):
clients = list()
@root_command.command()
@per_connection()
def sub(client):
if hostname_only:
clients.append(client[1]["hosts"][0])
else:
clients.append(client)
return clients, sub.name
@pytest.fixture()
def mock_clients(monkeypatch):
monkeypatch.setattr(
"esok.config.connection_options.Elasticsearch",
lambda *args, **kwargs: (args, kwargs),
)
monkeypatch.setattr(
"esok.config.connection_options.create_default_context",
lambda *args, cafile, **kwargs: cafile,
)
| 31.527205 | 88 | 0.676863 |
d17104d80c7635941723a21e920cf239fb4a4b55
| 2,475 |
py
|
Python
|
k4MarlinWrapper/examples/runit.py
|
vvolkl/k4MarlinWrapper
|
8a9692790606dee2c9e6c4ce976bf8cee43a4943
|
[
"Apache-2.0"
] | null | null | null |
k4MarlinWrapper/examples/runit.py
|
vvolkl/k4MarlinWrapper
|
8a9692790606dee2c9e6c4ce976bf8cee43a4943
|
[
"Apache-2.0"
] | 9 |
2020-07-14T09:13:31.000Z
|
2020-11-09T10:51:34.000Z
|
k4MarlinWrapper/examples/runit.py
|
vvolkl/k4MarlinWrapper
|
8a9692790606dee2c9e6c4ce976bf8cee43a4943
|
[
"Apache-2.0"
] | 2 |
2020-03-25T12:50:10.000Z
|
2020-04-09T12:22:34.000Z
|
from Gaudi.Configuration import *
from Configurables import LcioEvent, EventDataSvc, MarlinProcessorWrapper
algList = []
evtsvc = EventDataSvc()
read = LcioEvent()
read.OutputLevel = DEBUG
read.Files = ["../test/inputFiles/muons.slcio"]
algList.append(read)
END_TAG = "END_TAG"
# assign parameters by hand, in future parse Marlin.xml file in python
# and convert to list of processors and parameters
procA = MarlinProcessorWrapper("AidaProcessor")
procA.OutputLevel = DEBUG
procA.ProcessorType = "AIDAProcessor"
procA.Parameters = {"FileName": ["histograms"],
"FileType": ["root"],
"Compress": ["1"],
"Verbosity": ["DEBUG"],
}
algList.append(procA)
proc0 = MarlinProcessorWrapper("EventNumber")
proc0.OutputLevel = DEBUG
proc0.ProcessorType = "Statusmonitor"
proc0.Parameters = {"HowOften": ["1"],
"Verbosity": ["DEBUG"],
}
algList.append(proc0)
proc1 = MarlinProcessorWrapper("InitDD4hep")
proc1.OutputLevel = DEBUG
proc1.ProcessorType = "InitializeDD4hep"
proc1.Parameters = {#"EncodingStringParameter", "GlobalTrackerReadoutID"},
#"DD4hepXMLFile", "/cvmfs/clicdp.cern.ch/iLCSoft/builds/nightly/x86_64-slc6-gcc62-opt/lcgeo/HEAD/CLIC/compact/CLIC_o3_v13/CLIC_o3_v13.xml"},
"DD4hepXMLFile": ["/cvmfs/clicdp.cern.ch/iLCSoft/builds/nightly/x86_64-slc6-gcc62-opt/lcgeo/HEAD/CLIC/compact/CLIC_o2_v04/CLIC_o2_v04.xml"],
}
algList.append(proc1)
digiVxd = MarlinProcessorWrapper("VXDBarrelDigitiser")
digiVxd.OutputLevel = DEBUG
digiVxd.ProcessorType = "DDPlanarDigiProcessor"
digiVxd.Parameters = {"SubDetectorName": ["Vertex"],
"IsStrip": ["false"],
"ResolutionU": ["0.003", "0.003", "0.003", "0.003", "0.003", "0.003"],
"ResolutionV": ["0.003", "0.003", "0.003", "0.003", "0.003", "0.003"],
"SimTrackHitCollectionName": ["VertexBarrelCollection"],
"SimTrkHitRelCollection": ["VXDTrackerHitRelations"],
"TrackerHitCollectionName": ["VXDTrackerHits"],
"Verbosity": ["DEBUG"],
}
algList.append(digiVxd)
from Configurables import ApplicationMgr
ApplicationMgr( TopAlg = algList,
EvtSel = 'NONE',
EvtMax = 10,
ExtSvc = [evtsvc],
OutputLevel=DEBUG
)
| 35.869565 | 160 | 0.62101 |
fe3193b2b0e366f02cb2b5ab3bb4feb39c582e2a
| 1,844 |
py
|
Python
|
utils/mvsec_utils.py
|
tub-rip/E-RAFT
|
2e74afcfe93c93f55cea583381489a24a55351ff
|
[
"MIT"
] | 32 |
2021-12-03T06:40:42.000Z
|
2022-03-31T04:59:39.000Z
|
utils/mvsec_utils.py
|
tub-rip/E-RAFT
|
2e74afcfe93c93f55cea583381489a24a55351ff
|
[
"MIT"
] | 7 |
2021-12-03T02:32:50.000Z
|
2022-01-13T02:49:24.000Z
|
utils/mvsec_utils.py
|
tub-rip/E-RAFT
|
2e74afcfe93c93f55cea583381489a24a55351ff
|
[
"MIT"
] | 11 |
2021-12-24T08:04:46.000Z
|
2022-03-25T07:34:28.000Z
|
import numpy as np
# import cv2
from matplotlib import pyplot as plt
import os
from utils import filename_templates as TEMPLATES
def prop_flow(x_flow, y_flow, x_indices, y_indices, x_mask, y_mask, scale_factor=1.0):
flow_x_interp = cv2.remap(x_flow,
x_indices,
y_indices,
cv2.INTER_NEAREST)
flow_y_interp = cv2.remap(y_flow,
x_indices,
y_indices,
cv2.INTER_NEAREST)
x_mask[flow_x_interp == 0] = False
y_mask[flow_y_interp == 0] = False
x_indices += flow_x_interp * scale_factor
y_indices += flow_y_interp * scale_factor
return
def estimate_corresponding_gt_flow(path_flow,
gt_timestamps,
start_time,
end_time):
# Each gt flow at timestamp gt_timestamps[gt_iter] represents the displacement between
# gt_iter and gt_iter+1.
# gt_timestamps[gt_iter] -> Timestamp just before start_time
gt_iter = np.searchsorted(gt_timestamps, start_time, side='right') - 1
gt_dt = gt_timestamps[gt_iter + 1] - gt_timestamps[gt_iter]
# Load Flow just before start_time
flow_file = os.path.join(path_flow, TEMPLATES.MVSEC_FLOW_GT_FILE.format(gt_iter))
flow = np.load(flow_file)
x_flow = flow[0]
y_flow = flow[1]
#x_flow = np.squeeze(x_flow_in[gt_iter, ...])
#y_flow = np.squeeze(y_flow_in[gt_iter, ...])
dt = end_time - start_time
# No need to propagate if the desired dt is shorter than the time between gt timestamps.
if gt_dt > dt:
return x_flow * dt / gt_dt, y_flow * dt / gt_dt
else:
raise Exception
| 34.792453 | 93 | 0.586768 |
90f31a5ad47f5b86bb7fc6942e0f80aacd95970b
| 5,070 |
py
|
Python
|
tests/test_case_name_tests.py
|
mehrdad-shokri/retdec-regression-tests-framework
|
9c3edcd0a7bc292a0d5b5cbfb4315010c78d3bc3
|
[
"MIT"
] | 21 |
2017-12-12T20:38:43.000Z
|
2019-04-14T12:46:10.000Z
|
tests/test_case_name_tests.py
|
mehrdad-shokri/retdec-regression-tests-framework
|
9c3edcd0a7bc292a0d5b5cbfb4315010c78d3bc3
|
[
"MIT"
] | 6 |
2018-01-06T13:32:23.000Z
|
2018-09-14T15:09:11.000Z
|
tests/test_case_name_tests.py
|
mehrdad-shokri/retdec-regression-tests-framework
|
9c3edcd0a7bc292a0d5b5cbfb4315010c78d3bc3
|
[
"MIT"
] | 11 |
2017-12-12T20:38:46.000Z
|
2018-07-19T03:12:03.000Z
|
"""
Tests for the :mod:`regression_tests.test_case_name_tests` module.
"""
import unittest
from regression_tests.filesystem.file import StandaloneFile
from regression_tests.test_case import TestCaseName
from regression_tests.tools.tool_arguments import ToolArguments
class TestCaseNameTests(unittest.TestCase):
"""Tests for `TestCaseName`."""
def test_class_name_returns_correct_value(self):
name = TestCaseName(
'Test (gcd.exe -a x86)'
)
self.assertEqual(name.class_name, 'Test')
def test_input_files_returns_correct_value_when_there_is_single_input_file(self):
name = TestCaseName(
'Test (gcd.exe -a x86)'
)
self.assertEqual(name.input_files, 'gcd.exe')
def test_input_files_returns_correct_value_when_there_is_single_input_file_with_dash(self):
name = TestCaseName(
'Test (my-file.exe -a x86)'
)
self.assertEqual(name.input_files, 'my-file.exe')
def test_input_files_returns_correct_value_when_there_are_multiple_input_files(self):
name = TestCaseName(
'Test (file1.exe file2.exe -f json)'
)
self.assertEqual(name.input_files, 'file1.exe file2.exe')
def test_input_files_returns_empty_string_if_there_are_no_input_files(self):
name = TestCaseName(
'Test (-v)'
)
self.assertEqual(name.input_files, '')
def test_tool_args_returns_correct_value_when_only_input_file_is_given(self):
name = TestCaseName('Test (gcd.exe)')
self.assertEqual(name.tool_args, 'gcd.exe')
def test_tool_args_returns_correct_value_when_additional_parameters_are_given(self):
name = TestCaseName(
'Test (gcd.exe -a x86)'
)
self.assertEqual(name.tool_args, 'gcd.exe -a x86')
def test_tool_args_returns_empty_string_when_there_are_no_arguments_or_input_files(self):
name = TestCaseName('Test ()')
self.assertEqual(name.tool_args, '')
def test_short_tool_args_returns_correct_value_when_shorter_than_limit(self):
name = TestCaseName(
'Test (gcd.exe -a x86)'
)
self.assertEqual(name.short_tool_args(200), 'gcd.exe -a x86')
def test_short_tool_args_returns_correct_value_when_longer_than_limit(self):
name = TestCaseName(
'Test (gcd.exe -a x86)'
)
self.assertEqual(name.short_tool_args(10), 'gcd.ex[..]')
def test_args_returns_correct_value_when_only_input_file_is_used(self):
name = TestCaseName('Test (gcd.exe)')
self.assertEqual(name.args, '')
def test_args_returns_correct_value_when_additional_parameters_are_given(self):
name = TestCaseName(
'Test (gcd.exe -a x86)'
)
self.assertEqual(name.args, '-a x86')
def test_args_returns_correct_value_when_there_are_multiple_input_files(self):
name = TestCaseName('Test (gcd1.exe gcd2.exe)')
self.assertEqual(name.args, '')
def test_args_returns_correct_value_when_there_are_multiple_input_files_and_other_arguments(self):
name = TestCaseName('Test (gcd1.exe gcd2.exe -f json)')
self.assertEqual(name.args, '-f json')
def test_args_returns_empty_string_when_there_are_no_arguments_or_input_files(self):
name = TestCaseName('Test ()')
self.assertEqual(name.args, '')
def test_short_args_returns_correct_value_when_shorter_than_limit(self):
name = TestCaseName(
'Test (gcd.exe -a x86)'
)
self.assertEqual(name.short_args(200), '-a x86')
def test_short_args_returns_correct_value_when_longer_than_limit(self):
name = TestCaseName(
'Test (gcd.exe -a x86)'
)
self.assertEqual(name.short_args(5), '-[..]')
def test_with_short_args_returns_correct_value_when_shorter_than_limit(self):
name = TestCaseName(
'Test (gcd.exe -a x86)'
)
self.assertEqual(
name.with_short_args(200),
'Test (gcd.exe -a x86)'
)
def test_with_short_args_returns_correct_value_when_longer_than_limit(self):
name = TestCaseName(
'Test (gcd.exe -a x86)'
)
self.assertEqual(name.with_short_args(4), 'Test (gcd.exe [..])')
def test_with_short_args_returns_correct_value_when_only_input_file_is_used(self):
name = TestCaseName('Test (gcd.exe)')
self.assertEqual(name.with_short_args(10), 'Test (gcd.exe)')
def test_with_short_args_returns_correct_value_when_there_are_no_arguments_or_input_files(self):
name = TestCaseName('Test ()')
self.assertEqual(name.with_short_args(10), 'Test ()')
def test_from_tool_arguments_returns_correct_test_case_name(self):
name = TestCaseName.from_tool_arguments(
test_name='Test',
tool_arguments=ToolArguments(
input_files=(StandaloneFile('gcd.exe'),),
args='-a x86'
)
)
self.assertEqual(name, 'Test (gcd.exe -a x86)')
| 37.007299 | 102 | 0.676529 |
c38f8a137a8ab674b0f9ad3ffa00628b4cf57150
| 98 |
py
|
Python
|
matizla/helpers/types.py
|
neotje/matizla
|
23afbc9ad3972c04e5882e0fed2de1ce0b7f397b
|
[
"MIT"
] | null | null | null |
matizla/helpers/types.py
|
neotje/matizla
|
23afbc9ad3972c04e5882e0fed2de1ce0b7f397b
|
[
"MIT"
] | null | null | null |
matizla/helpers/types.py
|
neotje/matizla
|
23afbc9ad3972c04e5882e0fed2de1ce0b7f397b
|
[
"MIT"
] | null | null | null |
from webview.window import Window
class JSwindow:
title: str
uuid: str
hidden: bool
| 12.25 | 33 | 0.683673 |
058a08600ba5c6150dc4f30eb208f753758e1148
| 511 |
py
|
Python
|
parser.py
|
fabiantheblind/my.feedbin.com.subscriptions
|
b89dd1fca65559ba03d5e773441ac318b4a4c935
|
[
"Unlicense"
] | null | null | null |
parser.py
|
fabiantheblind/my.feedbin.com.subscriptions
|
b89dd1fca65559ba03d5e773441ac318b4a4c935
|
[
"Unlicense"
] | null | null | null |
parser.py
|
fabiantheblind/my.feedbin.com.subscriptions
|
b89dd1fca65559ba03d5e773441ac318b4a4c935
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python
from bs4 import BeautifulSoup
import codecs
soup = BeautifulSoup(open("subscriptions.xml"))
tags = soup.find_all("outline")
f = codecs.open("links.md","w","utf-8")
for tag in tags:
# print tag['htmlurl']
if tag.get('htmlurl') is None:
f.write("\n##" + tag.get('title')+ "\n\n")
# print "##" + tag.get('title')
else:
f.write("- ["+tag.get('text')+"]("+tag.get('htmlurl')+") \n")
# print "- ["+tag.get('text')+"]("+tag.get('htmlurl')+") "
f.close()
| 31.9375 | 70 | 0.55773 |
13b2d60a4c503c10251704e6d2fff4e105541b37
| 1,779 |
py
|
Python
|
task13_b.py
|
mboehn/aoc2017
|
1bf5302c6e566e8454d3e567cfac38945c8fe955
|
[
"MIT"
] | null | null | null |
task13_b.py
|
mboehn/aoc2017
|
1bf5302c6e566e8454d3e567cfac38945c8fe955
|
[
"MIT"
] | null | null | null |
task13_b.py
|
mboehn/aoc2017
|
1bf5302c6e566e8454d3e567cfac38945c8fe955
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import math
import sys
import csv
from pprint import pprint
import collections
import operator
import func
INPUTFILE = './task13.input'
MY_DEPTH = 0
def main():
layers = {}
with open(file=INPUTFILE, mode='r') as fileinput:
for line in fileinput:
layer, depth = line.strip().split(': ')
layers[int(layer)] = int(depth)
#pprint(layers)
delay = 0
last_layer = max(layers) # layer num is the key
while True:
position = 0
severity = 0
hits = 0
while position <= last_layer:
# print("** current position is: {}".format(position))
try:
current_depth = layers[position]
# print("** this layer has a depth of {}".format(current_depth))
except:
current_depth = None
# print("** no scanner at this layer")
if current_depth:
scanner_position = (delay + position) % ((current_depth - 1) * 2)
# print("** the scanner at this layer has a position of: {}".format(scanner_position))
if scanner_position == MY_DEPTH:
# print("!! i was hit by the scanner! adding to severity...")
severity += position * current_depth
hits += 1
# print()
position += 1
# print("== severity is {}".format(severity))
if hits == 0:
print("++ escaped without hits")
print("== delay was {}".format(delay))
break
# print("-- new round. delay was {}".format(delay))
# print()
delay += 1
if __name__ == '__main__':
if len(sys.argv) == 2:
INPUTFILE = sys.argv[1]
main()
| 25.414286 | 101 | 0.525014 |
a12ee170bc397851916594783db89aebe191102a
| 12,859 |
py
|
Python
|
examples/generate_with_calibration_v2.py
|
mikkyhu/transformers
|
b28abe5c4d221ff2574dde0355c985fd11f1152b
|
[
"Apache-2.0"
] | null | null | null |
examples/generate_with_calibration_v2.py
|
mikkyhu/transformers
|
b28abe5c4d221ff2574dde0355c985fd11f1152b
|
[
"Apache-2.0"
] | null | null | null |
examples/generate_with_calibration_v2.py
|
mikkyhu/transformers
|
b28abe5c4d221ff2574dde0355c985fd11f1152b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/Transformer-XL/XLNet)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
from tqdm import trange
import torch
import torch.nn.functional as F
import numpy as np
from transformers import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig
from transformers import GPT2LMHeadModel, GPT2Tokenizer
from transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from transformers import XLNetLMHeadModel, XLNetTokenizer
from transformers import TransfoXLLMHeadModel, TransfoXLTokenizer
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
MODEL_CLASSES = {
'gpt2': (GPT2LMHeadModel, GPT2Tokenizer),
'openai-gpt': (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
'xlnet': (XLNetLMHeadModel, XLNetTokenizer),
'transfo-xl': (TransfoXLLMHeadModel, TransfoXLTokenizer),
}
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
# TODO: setting custom seed not robust.
def set_seed(args=None, seed=42):
if args is None:
np.random.seed(seed)
torch.manual_seed(seed)
# not robust. will crash if no gpus available.
torch.cuda.manual_seed_all(seed)
else:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def get_lookahead_entropies(model, context, batch_size, vocab_size, candidates=None, is_xlnet=False, device='cpu'):
# in: context (1-dim tensor of t tokens)
# out: ents (1-dim tensor of entropies of w_{t+2} given each candidate w_{t+1})
# i.e. ents[w] = H(w_{t+2} | w_{<=t}, w_{t+1}=w)
# tries all words by default, but can pass in array of candidates
batch = torch.zeros(len(context)+1, dtype=torch.long, device=device)
batch[:len(context)] = context
batch = batch.unsqueeze(0).repeat(batch_size, 1)
# mark uncomputed entropies with -1 for processing later
ents = -torch.ones(vocab_size, device=device)
# if not passed an array of candidates, try all candidates
if candidates is None:
candidates = torch.arange(vocab_size, dtype=torch.long, device=device)
batch_start = 0
with torch.no_grad():
# loop over all w_{t+1}, chunking into batches
while batch_start < len(candidates):
batch_end = min(len(candidates), batch_start + batch_size)
batch[:batch_end-batch_start, -1] = candidates[batch_start:batch_end]
inputs = {'input_ids': batch}
if is_xlnet:
pass # not equipped to deal with xlnet yet
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][:, -1, :]
next_probs = F.softmax(next_token_logits, dim=-1)
next_ents = torch.sum(-next_probs * torch.log(next_probs + 1e-20), dim=-1)
ents[batch_start:batch_end] = next_ents[:batch_end-batch_start]
batch_start += batch_size
return ents
def sample_sequence_calibrated(model, length, context, batch_size, vocab_size, alpha=0.0, temperature=1, top_k=0, top_p=0.0, is_xlnet=False, device='cpu'):
num_samples = 1 # since entropy measurement is slow, no point parallelizing
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
# ents[k,t] = H(w_t | w_<t) for independent generation k
ents = torch.zeros((num_samples, length), device=device)
with torch.no_grad():
for gen_index in range(length):
inputs = {'input_ids': generated}
if is_xlnet:
pass # not equipped to deal with xlnet yet
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
model_next_logits = outputs[0][:, -1, :] / temperature
if top_k == 0:
candidates = None # try all words for lookahead
else:
# try k most likely words
candidates = torch.argsort(model_next_logits[0], descending=True)[:top_k]
lookahead_ents = get_lookahead_entropies(model, generated[0], batch_size, vocab_size, candidates=candidates, device=device, is_xlnet=is_xlnet).unsqueeze(0)
if top_k != 0:
# replace uncomputed entropies with average (for centered adjustment)
top_average_ent = lookahead_ents[lookahead_ents != -1].mean()
lookahead_ents[lookahead_ents != -1] = top_average_ent
calibrated_next_logits = model_next_logits - alpha * lookahead_ents
next_probs = F.softmax(calibrated_next_logits, dim=-1)
next_ents = torch.sum(-next_probs * torch.log(next_probs + 1e-20), dim=-1)
ents[:, gen_index] = next_ents
# sample from top k
# filtered_logits = top_k_top_p_filtering(calibrated_next_logits, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(F.softmax(calibrated_next_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token), dim=1)
return generated, ents
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--save_name", type=str, default="test.npz")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--num_samples", type=int, default=4)
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--top_p", type=float, default=0.0)
parser.add_argument("--alpha", type=float, default=0)
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
set_seed(args)
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
model = model_class.from_pretrained(args.model_name_or_path)
model.to(args.device)
model.eval()
if args.length < 0 and model.config.max_position_embeddings > 0:
args.length = model.config.max_position_embeddings
elif 0 < model.config.max_position_embeddings < args.length:
args.length = model.config.max_position_embeddings # No generation bigger than model size
elif args.length < 0:
args.length = MAX_LENGTH # avoid infinite loop
print(args)
vocab_size = tokenizer.vocab_size
print('vocab_size:', vocab_size)
while True:
raw_text = args.prompt if args.prompt else input("Model prompt >>> ")
if args.model_type in ["transfo-xl", "xlnet"]:
# Models with memory likes to have a long prompt for short inputs.
raw_text = (args.padding_text if args.padding_text else PADDING_TEXT) + raw_text
context_tokens = tokenizer.encode(raw_text)
avg_ents = torch.zeros((1, args.length), device=args.device)
for i in range(args.num_samples):
set_seed(seed=i + args.seed)
out, ents = sample_sequence_calibrated(
model=model,
context=context_tokens,
length=args.length,
batch_size=args.batch_size,
vocab_size=vocab_size,
alpha=args.alpha,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
is_xlnet=bool(args.model_type == "xlnet"),
)
# show all generations from this batch
for j in range(len(out)):
seq = out[j, len(context_tokens):].tolist()
text = tokenizer.decode(seq, clean_up_tokenization_spaces=True)
print(text)
ents = ents.mean(axis=0)
avg_ents = (avg_ents * i + ents) / (i + 1)
np.savez(args.save_name, avg_ents=avg_ents.cpu().numpy())
if args.prompt:
break
return text
if __name__ == '__main__':
main()
| 44.804878 | 167 | 0.66887 |
a6fa4a978219ee9d991bb2361e0fb4a96309e0e9
| 602 |
py
|
Python
|
testprojectD-rice-d058558a4d4f/migrations/versions/9bd4ada1f824_.py
|
YuanXMjoy/rice
|
05e908eea8c9189c3b392d2d57e5653191bf1da9
|
[
"MIT"
] | null | null | null |
testprojectD-rice-d058558a4d4f/migrations/versions/9bd4ada1f824_.py
|
YuanXMjoy/rice
|
05e908eea8c9189c3b392d2d57e5653191bf1da9
|
[
"MIT"
] | null | null | null |
testprojectD-rice-d058558a4d4f/migrations/versions/9bd4ada1f824_.py
|
YuanXMjoy/rice
|
05e908eea8c9189c3b392d2d57e5653191bf1da9
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 9bd4ada1f824
Revises: b809ef5c228c
Create Date: 2016-07-09 15:30:22.896575
"""
# revision identifiers, used by Alembic.
revision = '9bd4ada1f824'
down_revision = 'b809ef5c228c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('orders', sa.Column('time', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('orders', 'time')
### end Alembic commands ###
| 22.296296 | 76 | 0.69103 |
788ea3d1f51c25e4fb89d9de87e3c0ec26a9c5af
| 7,970 |
gyp
|
Python
|
snapshot/snapshot.gyp
|
dark-richie/crashpad
|
d573ac113bd5fce5cc970bb5ae76a235a1431a5d
|
[
"Apache-2.0"
] | 575 |
2015-06-18T23:58:20.000Z
|
2022-03-23T09:32:39.000Z
|
snapshot/snapshot.gyp
|
dark-richie/crashpad
|
d573ac113bd5fce5cc970bb5ae76a235a1431a5d
|
[
"Apache-2.0"
] | 113 |
2019-12-14T04:28:04.000Z
|
2021-09-26T18:40:27.000Z
|
snapshot/snapshot.gyp
|
dark-richie/crashpad
|
d573ac113bd5fce5cc970bb5ae76a235a1431a5d
|
[
"Apache-2.0"
] | 52 |
2015-07-14T10:40:50.000Z
|
2022-03-15T01:11:49.000Z
|
# Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'includes': [
'../build/crashpad.gypi',
],
'targets': [
{
'target_name': 'crashpad_snapshot',
'type': 'static_library',
'dependencies': [
'../client/client.gyp:crashpad_client',
'../compat/compat.gyp:crashpad_compat',
'../third_party/mini_chromium/mini_chromium.gyp:base',
'../util/util.gyp:crashpad_util',
],
'include_dirs': [
'..',
],
'sources': [
'annotation_snapshot.cc',
'annotation_snapshot.h',
'capture_memory.cc',
'capture_memory.h',
'cpu_architecture.h',
'cpu_context.cc',
'cpu_context.h',
'crashpad_info_client_options.cc',
'crashpad_info_client_options.h',
'crashpad_types/crashpad_info_reader.cc',
'crashpad_types/crashpad_info_reader.h',
'crashpad_types/image_annotation_reader.cc',
'crashpad_types/image_annotation_reader.h',
'elf/elf_dynamic_array_reader.cc',
'elf/elf_dynamic_array_reader.h',
'elf/elf_image_reader.cc',
'elf/elf_image_reader.h',
'elf/elf_symbol_table_reader.cc',
'elf/elf_symbol_table_reader.h',
'elf/module_snapshot_elf.cc',
'elf/module_snapshot_elf.h',
'exception_snapshot.h',
'handle_snapshot.cc',
'handle_snapshot.h',
'linux/cpu_context_linux.cc',
'linux/cpu_context_linux.h',
'linux/debug_rendezvous.cc',
'linux/debug_rendezvous.h',
'linux/exception_snapshot_linux.cc',
'linux/exception_snapshot_linux.h',
'linux/process_reader_linux.cc',
'linux/process_reader_linux.h',
'linux/process_snapshot_linux.cc',
'linux/process_snapshot_linux.h',
'linux/signal_context.h',
'linux/system_snapshot_linux.cc',
'linux/system_snapshot_linux.h',
'linux/thread_snapshot_linux.cc',
'linux/thread_snapshot_linux.h',
'mac/cpu_context_mac.cc',
'mac/cpu_context_mac.h',
'mac/exception_snapshot_mac.cc',
'mac/exception_snapshot_mac.h',
'mac/mach_o_image_annotations_reader.cc',
'mac/mach_o_image_annotations_reader.h',
'mac/mach_o_image_reader.cc',
'mac/mach_o_image_reader.h',
'mac/mach_o_image_segment_reader.cc',
'mac/mach_o_image_segment_reader.h',
'mac/mach_o_image_symbol_table_reader.cc',
'mac/mach_o_image_symbol_table_reader.h',
'mac/module_snapshot_mac.cc',
'mac/module_snapshot_mac.h',
'mac/process_reader_mac.cc',
'mac/process_reader_mac.h',
'mac/process_snapshot_mac.cc',
'mac/process_snapshot_mac.h',
'mac/process_types.cc',
'mac/process_types.h',
'mac/process_types/all.proctype',
'mac/process_types/annotation.proctype',
'mac/process_types/crashpad_info.proctype',
'mac/process_types/crashreporterclient.proctype',
'mac/process_types/custom.cc',
'mac/process_types/dyld_images.proctype',
'mac/process_types/flavors.h',
'mac/process_types/internal.h',
'mac/process_types/loader.proctype',
'mac/process_types/nlist.proctype',
'mac/process_types/traits.h',
'mac/system_snapshot_mac.cc',
'mac/system_snapshot_mac.h',
'mac/thread_snapshot_mac.cc',
'mac/thread_snapshot_mac.h',
'memory_snapshot.cc',
'memory_snapshot.h',
'memory_snapshot_generic.h',
'minidump/minidump_annotation_reader.cc',
'minidump/minidump_annotation_reader.h',
'minidump/minidump_context_converter.cc',
'minidump/minidump_context_converter.h',
'minidump/minidump_simple_string_dictionary_reader.cc',
'minidump/minidump_simple_string_dictionary_reader.h',
'minidump/minidump_stream.h',
'minidump/minidump_string_list_reader.cc',
'minidump/minidump_string_list_reader.h',
'minidump/minidump_string_reader.cc',
'minidump/minidump_string_reader.h',
'minidump/exception_snapshot_minidump.cc',
'minidump/exception_snapshot_minidump.h',
'minidump/memory_snapshot_minidump.cc',
'minidump/memory_snapshot_minidump.h',
'minidump/module_snapshot_minidump.cc',
'minidump/module_snapshot_minidump.h',
'minidump/process_snapshot_minidump.cc',
'minidump/process_snapshot_minidump.h',
'minidump/system_snapshot_minidump.cc',
'minidump/system_snapshot_minidump.h',
'minidump/thread_snapshot_minidump.cc',
'minidump/thread_snapshot_minidump.h',
'module_snapshot.h',
'posix/timezone.cc',
'posix/timezone.h',
'process_snapshot.h',
'sanitized/memory_snapshot_sanitized.cc',
'sanitized/memory_snapshot_sanitized.h',
'sanitized/module_snapshot_sanitized.cc',
'sanitized/module_snapshot_sanitized.h',
'sanitized/process_snapshot_sanitized.cc',
'sanitized/process_snapshot_sanitized.h',
'sanitized/sanitization_information.cc',
'sanitized/sanitization_information.h',
'sanitized/thread_snapshot_sanitized.cc',
'sanitized/thread_snapshot_sanitized.h',
'snapshot_constants.h',
'system_snapshot.h',
'thread_snapshot.h',
'unloaded_module_snapshot.cc',
'unloaded_module_snapshot.h',
'win/cpu_context_win.cc',
'win/cpu_context_win.h',
'win/exception_snapshot_win.cc',
'win/exception_snapshot_win.h',
'win/capture_memory_delegate_win.cc',
'win/capture_memory_delegate_win.h',
'win/memory_map_region_snapshot_win.cc',
'win/memory_map_region_snapshot_win.h',
'win/module_snapshot_win.cc',
'win/module_snapshot_win.h',
'win/pe_image_annotations_reader.cc',
'win/pe_image_annotations_reader.h',
'win/pe_image_reader.cc',
'win/pe_image_reader.h',
'win/pe_image_resource_reader.cc',
'win/pe_image_resource_reader.h',
'win/process_reader_win.cc',
'win/process_reader_win.h',
'win/process_snapshot_win.cc',
'win/process_snapshot_win.h',
'win/process_subrange_reader.cc',
'win/process_subrange_reader.h',
'win/system_snapshot_win.cc',
'win/system_snapshot_win.h',
'win/thread_snapshot_win.cc',
'win/thread_snapshot_win.h',
'x86/cpuid_reader.cc',
'x86/cpuid_reader.h',
],
'conditions': [
['OS=="win"', {
'link_settings': {
'libraries': [
'-lpowrprof.lib',
],
},
}],
['OS=="linux" or OS=="android"', {
'sources!': [
'capture_memory.cc',
'capture_memory.h',
],
}, { # else: OS!="linux" and OS!="android"
'sources/': [
['exclude', '^elf/'],
['exclude', '^crashpad_types/'],
['exclude', '^sanitized/'],
],
}],
['target_arch!="ia32" and target_arch!="x64"', {
'sources/': [
['exclude', '^x86/'],
],
}],
],
'target_conditions': [
['OS=="android"', {
'sources/': [
['include', '^linux/'],
],
}],
],
},
],
}
| 36.728111 | 74 | 0.62522 |
2b992ea2e9f7bff0a2cc0d51a2445922e3dbed5b
| 31,437 |
py
|
Python
|
lib/enum34/__init__.py
|
xbmc-catchuptv-au/script.module.pycaption
|
4f76781c38d50c138dca2344bcceb748764d76da
|
[
"Apache-2.0"
] | 1 |
2019-05-06T20:09:13.000Z
|
2019-05-06T20:09:13.000Z
|
lib/enum34/__init__.py
|
xbmc-catchuptv-au/script.module.pycaption
|
4f76781c38d50c138dca2344bcceb748764d76da
|
[
"Apache-2.0"
] | 2 |
2020-01-24T13:03:23.000Z
|
2020-02-06T02:21:01.000Z
|
lib/enum34/__init__.py
|
xbmc-catchuptv-au/script.module.pycaption
|
4f76781c38d50c138dca2344bcceb748764d76da
|
[
"Apache-2.0"
] | 2 |
2019-06-15T15:25:42.000Z
|
2019-09-11T02:36:48.000Z
|
"""Python Enumerations"""
import sys as _sys
__all__ = ['Enum', 'IntEnum', 'unique']
version = 1, 1, 6
pyver = float('%s.%s' % _sys.version_info[:2])
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
try:
basestring
except NameError:
# In Python 2 basestring is the ancestor of both str and unicode
# in Python 3 it's just str, but was missing in 3.1
basestring = str
try:
unicode
except NameError:
# In Python 3 unicode no longer exists (it's just str)
unicode = str
class _RouteClassAttributeToGetattr(object):
"""Route attribute access on a class to __getattr__.
This is a descriptor, used to define attributes that act differently when
accessed through an instance and through a class. Instance access remains
normal, but access to an attribute through a class will be routed to the
class's __getattr__ method; this is done by raising AttributeError.
"""
def __init__(self, fget=None):
self.fget = fget
def __get__(self, instance, ownerclass=None):
if instance is None:
raise AttributeError()
return self.fget(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def _is_descriptor(obj):
"""Returns True if obj is a descriptor, False otherwise."""
return (
hasattr(obj, '__get__') or
hasattr(obj, '__set__') or
hasattr(obj, '__delete__'))
def _is_dunder(name):
"""Returns True if a __dunder__ name, False otherwise."""
return (name[:2] == name[-2:] == '__' and
name[2:3] != '_' and
name[-3:-2] != '_' and
len(name) > 4)
def _is_sunder(name):
"""Returns True if a _sunder_ name, False otherwise."""
return (name[0] == name[-1] == '_' and
name[1:2] != '_' and
name[-2:-1] != '_' and
len(name) > 2)
def _make_class_unpicklable(cls):
"""Make the given class un-picklable."""
def _break_on_call_reduce(self, protocol=None):
raise TypeError('%r cannot be pickled' % self)
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
class _EnumDict(dict):
"""Track enum member order and ensure member names are not reused.
EnumMeta will use the names found in self._member_names as the
enumeration member names.
"""
def __init__(self):
super(_EnumDict, self).__init__()
self._member_names = []
def __setitem__(self, key, value):
"""Changes anything not dundered or not a descriptor.
If a descriptor is added with the same name as an enum member, the name
is removed from _member_names (this may leave a hole in the numerical
sequence of values).
If an enum member name is used twice, an error is raised; duplicate
values are not checked for.
Single underscore (sunder) names are reserved.
Note: in 3.x __order__ is simply discarded as a not necessary piece
leftover from 2.x
"""
if pyver >= 3.0 and key in ('_order_', '__order__'):
return
elif key == '__order__':
key = '_order_'
if _is_sunder(key):
if key != '_order_':
raise ValueError('_names_ are reserved for future Enum use')
elif _is_dunder(key):
pass
elif key in self._member_names:
# descriptor overwriting an enum?
raise TypeError('Attempted to reuse key: %r' % key)
elif not _is_descriptor(value):
if key in self:
# enum overwriting a descriptor?
raise TypeError('Key already defined as: %r' % self[key])
self._member_names.append(key)
super(_EnumDict, self).__setitem__(key, value)
# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
# EnumMeta finishes running the first time the Enum class doesn't exist. This
# is also why there are checks in EnumMeta like `if Enum is not None`
Enum = None
class EnumMeta(type):
"""Metaclass for Enum"""
@classmethod
def __prepare__(metacls, cls, bases):
return _EnumDict()
def __new__(metacls, cls, bases, classdict):
# an Enum class is final once enumeration items have been defined; it
# cannot be mixed with other types (int, float, etc.) if it has an
# inherited __new__ unless a new __new__ is defined (or the resulting
# class will fail).
if type(classdict) is dict:
original_dict = classdict
classdict = _EnumDict()
for k, v in original_dict.items():
classdict[k] = v
member_type, first_enum = metacls._get_mixins_(bases)
__new__, save_new, use_args = metacls._find_new_(classdict,
member_type,
first_enum)
# save enum items into separate mapping so they don't get baked into
# the new class
members = dict((k, classdict[k]) for k in classdict._member_names)
for name in classdict._member_names:
del classdict[name]
# py2 support for definition order
_order_ = classdict.get('_order_')
if _order_ is None:
if pyver < 3.0:
try:
_order_ = [name for (name, value) in
sorted(members.items(),
key=lambda item: item[1])]
except TypeError:
_order_ = [name for name in sorted(members.keys())]
else:
_order_ = classdict._member_names
else:
del classdict['_order_']
if pyver < 3.0:
_order_ = _order_.replace(',', ' ').split()
aliases = [name for name in members if name not in _order_]
_order_ += aliases
# check for illegal enum names (any others?)
invalid_names = set(members) & set(['mro'])
if invalid_names:
raise ValueError('Invalid enum member name(s): %s' % (
', '.join(invalid_names),))
# save attributes from super classes so we know if we can take
# the shortcut of storing members in the class dict
base_attributes = set([a for b in bases for a in b.__dict__])
# create our new Enum type
enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases,
classdict)
enum_class._member_names_ = [] # names in random order
if OrderedDict is not None:
enum_class._member_map_ = OrderedDict()
else:
enum_class._member_map_ = {} # name->value map
enum_class._member_type_ = member_type
# Reverse value->name map for hashable values.
enum_class._value2member_map_ = {}
# instantiate them, checking for duplicates as we go
# we instantiate first instead of checking for duplicates first in case
# a custom __new__ is doing something funky with the values -- such as
# auto-numbering ;)
if __new__ is None:
__new__ = enum_class.__new__
for member_name in _order_:
value = members[member_name]
if not isinstance(value, tuple):
args = (value,)
else:
args = value
if member_type is tuple: # special case for tuple enums
args = (args,) # wrap it one more time
if not use_args or not args:
enum_member = __new__(enum_class)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = value
else:
enum_member = __new__(enum_class, *args)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = member_type(*args)
value = enum_member._value_
enum_member._name_ = member_name
enum_member.__objclass__ = enum_class
enum_member.__init__(*args)
# If another member with the same value was already defined, the
# new member becomes an alias to the existing one.
for name, canonical_member in enum_class._member_map_.items():
if canonical_member.value == enum_member._value_:
enum_member = canonical_member
break
else:
# Aliases don't appear in member names (only in __members__).
enum_class._member_names_.append(member_name)
# performance boost for any member that would not shadow
# a DynamicClassAttribute (aka _RouteClassAttributeToGetattr)
if member_name not in base_attributes:
setattr(enum_class, member_name, enum_member)
# now add to _member_map_
enum_class._member_map_[member_name] = enum_member
try:
# This may fail if value is not hashable. We can't add the
# value to the map, and by-value lookups for this value will be
# linear.
enum_class._value2member_map_[value] = enum_member
except TypeError:
pass
# If a custom type is mixed into the Enum, and it does not know how
# to pickle itself, pickle.dumps will succeed but pickle.loads will
# fail. Rather than have the error show up later and possibly far
# from the source, sabotage the pickle protocol for this class so
# that pickle.dumps also fails.
#
# However, if the new class implements its own __reduce_ex__, do not
# sabotage -- it's on them to make sure it works correctly. We use
# __reduce_ex__ instead of any of the others as it is preferred by
# pickle over __reduce__, and it handles all pickle protocols.
unpicklable = False
if '__reduce_ex__' not in classdict:
if member_type is not object:
methods = ('__getnewargs_ex__', '__getnewargs__',
'__reduce_ex__', '__reduce__')
if not any(m in member_type.__dict__ for m in methods):
_make_class_unpicklable(enum_class)
unpicklable = True
# double check that repr and friends are not the mixin's or various
# things break (such as pickle)
for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
class_method = getattr(enum_class, name)
enum_method = getattr(first_enum, name, None)
if name not in classdict and class_method is not enum_method:
if name == '__reduce_ex__' and unpicklable:
continue
setattr(enum_class, name, enum_method)
# method resolution and int's are not playing nice
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
if issubclass(enum_class, int):
setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
elif pyver < 3.0:
if issubclass(enum_class, int):
for method in (
'__le__',
'__lt__',
'__gt__',
'__ge__',
'__eq__',
'__ne__',
'__hash__',
):
setattr(enum_class, method, getattr(int, method))
# replace any other __new__ with our own (as long as Enum is not None,
# anyway) -- again, this is to support pickle
if Enum is not None:
# if the user defined their own __new__, save it before it gets
# clobbered in case they subclass later
if save_new:
setattr(enum_class, '__member_new__',
enum_class.__dict__['__new__'])
setattr(enum_class, '__new__', Enum.__dict__['__new__'])
return enum_class
def __bool__(cls):
"""
classes/types should always be True.
"""
return True
def __call__(cls, value, names=None, module=None, type=None, start=1):
"""Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='red green blue')).
When used for the functional API: `module`, if set, will be stored in
the new class' __module__ attribute; `type`, if set, will be mixed in
as the first base class.
Note: if `module` is not set this routine will attempt to discover the
calling module by walking the frame stack; if this is unsuccessful
the resulting class will not be pickleable.
"""
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(value, names, module=module, type=type,
start=start)
def __contains__(cls, member):
return isinstance(member, cls) and member.name in cls._member_map_
def __delattr__(cls, attr):
# nicer error message when someone tries to delete an attribute
# (see issue19025).
if attr in cls._member_map_:
raise AttributeError(
"%s: cannot delete Enum member." % cls.__name__)
super(EnumMeta, cls).__delattr__(attr)
def __dir__(self):
return (['__class__', '__doc__', '__members__', '__module__'] +
self._member_names_)
@property
def __members__(cls):
"""Returns a mapping of member name->value.
This mapping lists all enum members, including aliases. Note that this
is a copy of the internal mapping.
"""
return cls._member_map_.copy()
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name)
def __getitem__(cls, name):
return cls._member_map_[name]
def __iter__(cls):
return (cls._member_map_[name] for name in cls._member_names_)
def __reversed__(cls):
return (cls._member_map_[name] for name in
reversed(cls._member_names_))
def __len__(cls):
return len(cls._member_names_)
__nonzero__ = __bool__
def __repr__(cls):
return "<enum %r>" % cls.__name__
def __setattr__(cls, name, value):
"""Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
"""
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('Cannot reassign members.')
super(EnumMeta, cls).__setattr__(name, value)
def _create_(cls, class_name, names=None, module=None, type=None, start=1):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are auto-numbered from 1.
* An iterable of member names. Values are auto-numbered from 1.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value.
"""
if pyver < 3.0:
# if class_name is unicode, attempt a conversion to ASCII
if isinstance(class_name, unicode):
try:
class_name = class_name.encode('ascii')
except UnicodeEncodeError:
raise TypeError(
'%r is not representable in ASCII' % class_name)
metacls = cls.__class__
if type is None:
bases = (cls,)
else:
bases = (type, cls)
classdict = metacls.__prepare__(class_name, bases)
_order_ = []
# special processing needed for names?
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0],
basestring):
names = [(e, i + start) for (i, e) in enumerate(names)]
# Here, names is either an iterable of (name, value) or a mapping.
item = None # in case names is empty
for item in names:
if isinstance(item, basestring):
member_name, member_value = item, names[item]
else:
member_name, member_value = item
classdict[member_name] = member_value
_order_.append(member_name)
# only set _order_ in classdict if name/value was not from a mapping
if not isinstance(item, basestring):
classdict['_order_'] = ' '.join(_order_)
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
# TODO: replace the frame hack if a blessed way to know the calling
# module is ever developed
if module is None:
try:
module = _sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError):
pass
if module is None:
_make_class_unpicklable(enum_class)
else:
enum_class.__module__ = module
return enum_class
@staticmethod
def _get_mixins_(bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__
"""
if not bases or Enum is None:
return object, Enum
# double check that we are not subclassing a class with existing
# enumeration members; while we're at it, see if any other data
# type has been mixed in so we can use the correct __new__
member_type = first_enum = None
for base in bases:
if (base is not Enum and
issubclass(base, Enum) and
base._member_names_):
raise TypeError("Cannot extend enumerations")
# base is now the last base in bases
if not issubclass(base, Enum):
raise TypeError("new enumerations must be created as "
"`ClassName([mixin_type,] enum_type)`")
# get correct mix-in type (either mix-in type of Enum subclass, or
# first base if last base is Enum)
if not issubclass(bases[0], Enum):
member_type = bases[0] # first data type
first_enum = bases[-1] # enum type
else:
for base in bases[0].__mro__:
# most common: (IntEnum, int, Enum, object)
# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
# <class 'int'>, <Enum 'Enum'>,
# <class 'object'>)
if issubclass(base, Enum):
if first_enum is None:
first_enum = base
else:
if member_type is None:
member_type = base
return member_type, first_enum
if pyver < 3.0:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__
# was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
if __new__:
return None, True, True # __new__, save_new, use_args
N__new__ = getattr(None, '__new__')
O__new__ = getattr(object, '__new__')
if Enum is None:
E__new__ = N__new__
else:
E__new__ = Enum.__dict__['__new__']
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
try:
target = possible.__dict__[method]
except (AttributeError, KeyError):
target = getattr(possible, method, None)
if target not in [
None,
N__new__,
O__new__,
E__new__,
]:
if method == '__member_new__':
classdict['__new__'] = target
return None, False, True
if isinstance(target, staticmethod):
target = target.__get__(member_type)
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and
# to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, False, use_args
else:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__
# was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
# should __new__ be saved as __member_new__ later?
save_new = __new__ is not None
if __new__ is None:
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
target = getattr(possible, method, None)
if target not in (
None,
None.__new__,
object.__new__,
Enum.__new__,
):
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and
# to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, save_new, use_args
########################################################
# In order to support Python 2 and 3 with a single
# codebase we have to create the Enum methods separately
# and then use the `type(name, bases, dict)` method to
# create the class.
########################################################
temp_enum_dict = {}
temp_enum_dict[
'__doc__'] = "Generic enumeration.\n\n Derive from this class to " \
"define new enumerations.\n\n"
def __new__(cls, value):
# all enum instances are actually created during class construction
# without calling this method; this method is called by the metaclass'
# __call__ (i.e. Color(3) ), and by pickle
if type(value) is cls:
# For lookups like Color(Color.red)
value = value.value
# return value
# by-value search for a matching enum member
# see if it's in the reverse mapping (for hashable values)
try:
if value in cls._value2member_map_:
return cls._value2member_map_[value]
except TypeError:
# not there, now do long search -- O(n) behavior
for member in cls._member_map_.values():
if member.value == value:
return member
raise ValueError("%s is not a valid %s" % (value, cls.__name__))
temp_enum_dict['__new__'] = __new__
del __new__
def __repr__(self):
return "<%s.%s: %r>" % (
self.__class__.__name__, self._name_, self._value_)
temp_enum_dict['__repr__'] = __repr__
del __repr__
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._name_)
temp_enum_dict['__str__'] = __str__
del __str__
if pyver >= 3.0:
def __dir__(self):
added_behavior = [
m
for cls in self.__class__.mro()
for m in cls.__dict__
if m[0] != '_' and m not in self._member_map_
]
return (['__class__', '__doc__', '__module__', ] + added_behavior)
temp_enum_dict['__dir__'] = __dir__
del __dir__
def __format__(self, format_spec):
# mixed-in Enums should use the mixed-in type's __format__, otherwise
# we can get strange results with the Enum name showing up instead of
# the value
# pure Enum branch
if self._member_type_ is object:
cls = str
val = str(self)
# mix-in branch
else:
cls = self._member_type_
val = self.value
return cls.__format__(val, format_spec)
temp_enum_dict['__format__'] = __format__
del __format__
####################################
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
def __cmp__(self, other):
if type(other) is self.__class__:
if self is other:
return 0
return -1
return NotImplemented
raise TypeError("unorderable types: %s() and %s()" % (
self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__cmp__'] = __cmp__
del __cmp__
else:
def __le__(self, other):
raise TypeError("unorderable types: %s() <= %s()" % (
self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__le__'] = __le__
del __le__
def __lt__(self, other):
raise TypeError("unorderable types: %s() < %s()" % (
self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__lt__'] = __lt__
del __lt__
def __ge__(self, other):
raise TypeError("unorderable types: %s() >= %s()" % (
self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__ge__'] = __ge__
del __ge__
def __gt__(self, other):
raise TypeError("unorderable types: %s() > %s()" % (
self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__gt__'] = __gt__
del __gt__
def __eq__(self, other):
if type(other) is self.__class__:
return self is other
return NotImplemented
temp_enum_dict['__eq__'] = __eq__
del __eq__
def __ne__(self, other):
if type(other) is self.__class__:
return self is not other
return NotImplemented
temp_enum_dict['__ne__'] = __ne__
del __ne__
def __hash__(self):
return hash(self._name_)
temp_enum_dict['__hash__'] = __hash__
del __hash__
def __reduce_ex__(self, proto):
return self.__class__, (self._value_,)
temp_enum_dict['__reduce_ex__'] = __reduce_ex__
del __reduce_ex__
# _RouteClassAttributeToGetattr is used to provide access to the `name`
# and `value` properties of enum members while keeping some measure of
# protection from modification, while still allowing for an enumeration
# to have members named `name` and `value`. This works because enumeration
# members are not set directly on the enum class -- __getattr__ is
# used to look them up.
@_RouteClassAttributeToGetattr
def name(self):
return self._name_
temp_enum_dict['name'] = name
del name
@_RouteClassAttributeToGetattr
def value(self):
return self._value_
temp_enum_dict['value'] = value
del value
@classmethod
def _convert(cls, name, module, filter, source=None):
"""
Create a new Enum subclass that replaces a collection of global constants
"""
# convert all constants from source (or module) that pass filter() to
# a new Enum called name, and export the enum and its members back to
# module;
# also, replace the __reduce_ex__ method so unpickling works in
# previous Python versions
module_globals = vars(_sys.modules[module])
if source:
source = vars(source)
else:
source = module_globals
members = dict(
(name, value) for name, value in source.items() if filter(name))
cls = cls(name, members, module=module)
cls.__reduce_ex__ = _reduce_ex_by_name
module_globals.update(cls.__members__)
module_globals[name] = cls
return cls
temp_enum_dict['_convert'] = _convert
del _convert
Enum = EnumMeta('Enum', (object,), temp_enum_dict)
del temp_enum_dict
# Enum has now been created
###########################
class IntEnum(int, Enum):
"""Enum where members are also (and must be) ints"""
def _reduce_ex_by_name(self, proto):
return self.name
def unique(enumeration):
"""Class decorator that ensures only unique members exist in an
enumeration."""
duplicates = []
for name, member in enumeration.__members__.items():
if name != member.name:
duplicates.append((name, member.name))
if duplicates:
duplicate_names = ', '.join(
["%s -> %s" % (alias, name) for (alias, name) in duplicates]
)
raise ValueError('duplicate names found in %r: %s' %
(enumeration, duplicate_names)
)
return enumeration
| 34.93 | 79 | 0.582689 |
5d8387d3c8a8a5ad1875e4abdb6e104ee9304d43
| 1,868 |
py
|
Python
|
api/tests/unit/segments/test_conditions.py
|
SolidStateGroup/Bullet-Train-API
|
ea47ccbdadf665a806ae4e0eff6ad1a2f1b0ba19
|
[
"BSD-3-Clause"
] | 126 |
2019-12-13T18:41:43.000Z
|
2020-11-10T13:33:55.000Z
|
api/tests/unit/segments/test_conditions.py
|
SolidStateGroup/Bullet-Train-API
|
ea47ccbdadf665a806ae4e0eff6ad1a2f1b0ba19
|
[
"BSD-3-Clause"
] | 30 |
2019-12-12T16:52:01.000Z
|
2020-11-09T18:55:29.000Z
|
api/tests/unit/segments/test_conditions.py
|
SolidStateGroup/Bullet-Train-API
|
ea47ccbdadf665a806ae4e0eff6ad1a2f1b0ba19
|
[
"BSD-3-Clause"
] | 20 |
2020-02-14T21:55:36.000Z
|
2020-11-03T22:29:03.000Z
|
import pytest
from environments.identities.traits.models import Trait
from segments.models import (
EQUAL,
GREATER_THAN,
GREATER_THAN_INCLUSIVE,
LESS_THAN,
LESS_THAN_INCLUSIVE,
NOT_EQUAL,
Condition,
)
@pytest.mark.parametrize(
"operator, trait_value, condition_value, result",
[
(EQUAL, "1.0.0", "1.0.0:semver", True),
(EQUAL, "1.0.0", "1.0.1:semver", False),
(NOT_EQUAL, "1.0.0", "1.0.0:semver", False),
(NOT_EQUAL, "1.0.0", "1.0.1:semver", True),
(GREATER_THAN, "1.0.1", "1.0.0:semver", True),
(GREATER_THAN, "1.0.0", "1.0.0-beta:semver", True),
(GREATER_THAN, "1.0.1", "1.2.0:semver", False),
(GREATER_THAN, "1.0.1", "1.0.1:semver", False),
(GREATER_THAN, "1.2.4", "1.2.3-pre.2+build.4:semver", True),
(LESS_THAN, "1.0.0", "1.0.1:semver", True),
(LESS_THAN, "1.0.0", "1.0.0:semver", False),
(LESS_THAN, "1.0.1", "1.0.0:semver", False),
(LESS_THAN, "1.0.0-rc.2", "1.0.0-rc.3:semver", True),
(GREATER_THAN_INCLUSIVE, "1.0.1", "1.0.0:semver", True),
(GREATER_THAN_INCLUSIVE, "1.0.1", "1.2.0:semver", False),
(GREATER_THAN_INCLUSIVE, "1.0.1", "1.0.1:semver", True),
(LESS_THAN_INCLUSIVE, "1.0.0", "1.0.1:semver", True),
(LESS_THAN_INCLUSIVE, "1.0.0", "1.0.0:semver", True),
(LESS_THAN_INCLUSIVE, "1.0.1", "1.0.0:semver", False),
],
)
def test_does_identity_match_for_semver_values(
identity, operator, trait_value, condition_value, result
):
# Given
condition = Condition(operator=operator, property="version", value=condition_value)
traits = [
Trait(
trait_key="version",
string_value=trait_value,
identity=identity,
)
]
# Then
assert condition.does_identity_match(identity, traits) is result
| 35.245283 | 87 | 0.590471 |
550d1c9604a9e68abc2d6dbb15cd0cb0b1868811
| 3,133 |
py
|
Python
|
30_psycopg2_for_pgsql_interfacing.py
|
nagasudhirpulla/python_wrldc_training
|
c3a3216c0a11e1dac03d4637b4b59b28f1bb83c6
|
[
"MIT"
] | null | null | null |
30_psycopg2_for_pgsql_interfacing.py
|
nagasudhirpulla/python_wrldc_training
|
c3a3216c0a11e1dac03d4637b4b59b28f1bb83c6
|
[
"MIT"
] | null | null | null |
30_psycopg2_for_pgsql_interfacing.py
|
nagasudhirpulla/python_wrldc_training
|
c3a3216c0a11e1dac03d4637b4b59b28f1bb83c6
|
[
"MIT"
] | 2 |
2020-09-30T16:32:18.000Z
|
2020-10-23T01:13:51.000Z
|
'''
Using psycopg2 module to interface with a postgreSQL database
pip install psycopg2
'''
# %%
# import psycopg2 module
import pandas as pd
import datetime as dt
import psycopg2
# get these variables from some config mechanism so that connection is not exposed and hardcoded
hostStr = 'x.x.x.x'
dbStr = 'hrd_training'
uNameStr = 'hrd_training'
dbPassStr = 'hrd#456'
# %%
# push data into a postgreSQL database
# create a connection object
conn = psycopg2.connect(host=hostStr, dbname=dbStr,
user=uNameStr, password=dbPassStr)
# get a cursor object from the connection
cur = conn.cursor()
# create sql for insertion
dataInsertionTuples = [
('util1', '2020-07-01 00:00:00.000', 25, 'urs', 102.5),
('util2', '2020-07-02 00:00:00.000', 26, 'sced', 50.7)
]
dataText = ','.join(cur.mogrify('(%s,%s,%s,%s,%s)', row).decode(
"utf-8") for row in dataInsertionTuples)
sqlTxt = 'INSERT INTO public.schedules(\
sch_utility, sch_date, sch_block, sch_type, sch_val)\
VALUES {0} on conflict (sch_utility, sch_date, sch_block, sch_type) \
do update set sch_val = excluded.sch_val'.format(dataText)
# execute the sql to perform insertion
cur.execute(sqlTxt)
# commit the changes
conn.commit()
# closing database connection and cursor
if(conn):
# close the cursor object to avoid memory leaks
cur.close()
# close the connection object also
conn.close()
# %%
# extract data from postgreSQL database
try:
# get the connection object
conn = psycopg2.connect(host=hostStr, dbname=dbStr,
user=uNameStr, password=dbPassStr)
# get the cursor from connection
cur = conn.cursor()
# create the query
postgreSQL_select_Query = "select sch_date + (15 * (sch_block-1) * interval '1 minute') as sch_time, \
sch_val from public.schedules where sch_utility=%s and sch_type=%s \
and sch_date between %s and %s order by sch_date, sch_block"
# execute the query
cur.execute(postgreSQL_select_Query,
('util1', 'urs', dt.datetime(2020, 7, 1), dt.datetime(2020, 7, 1)))
# fetch all the records from cursor
records = cur.fetchall()
# get the column names returned from the query
colNames = [row[0] for row in cur.description]
# create a dataframe from the fetched records
records = pd.DataFrame.from_records(records, columns=colNames)
# =============================================================================
# iterate through all the fetched records
# for rowIter in range(records.shape[0]):
# print("datetime = ", records['sch_time'].iloc[rowIter])
# print("value = ", records['sch_val'].iloc[rowIter])
# =============================================================================
except (Exception, psycopg2.Error) as error:
print("Error while fetching data from PostgreSQL", error)
records = 0
finally:
# closing database connection and cursor
if(conn):
# close the cursor object to avoid memory leaks
cur.close()
# close the connection object also
conn.close()
# %%
| 32.298969 | 106 | 0.638047 |
e97687de3427378489a91eaa65da3b8f5bee7b3b
| 15,952 |
py
|
Python
|
pandas/core/arrays/_mixins.py
|
mujtahidalam/pandas
|
526468c8fe6fc5157aaf2fce327c5ab2a3350f49
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1 |
2021-04-28T05:15:42.000Z
|
2021-04-28T05:15:42.000Z
|
pandas/core/arrays/_mixins.py
|
mujtahidalam/pandas
|
526468c8fe6fc5157aaf2fce327c5ab2a3350f49
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/arrays/_mixins.py
|
mujtahidalam/pandas
|
526468c8fe6fc5157aaf2fce327c5ab2a3350f49
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
from __future__ import annotations
from functools import wraps
from typing import (
Any,
Sequence,
TypeVar,
cast,
)
import numpy as np
from pandas._libs import lib
from pandas._typing import (
F,
PositionalIndexer2D,
Shape,
type_t,
)
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.util._validators import (
validate_bool_kwarg,
validate_fillna_kwargs,
)
from pandas.core.dtypes.common import is_dtype_equal
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.missing import array_equivalent
from pandas.core import missing
from pandas.core.algorithms import (
take,
unique,
value_counts,
)
from pandas.core.array_algos.transforms import shift
from pandas.core.arrays.base import ExtensionArray
from pandas.core.construction import extract_array
from pandas.core.indexers import check_array_indexer
from pandas.core.sorting import nargminmax
NDArrayBackedExtensionArrayT = TypeVar(
"NDArrayBackedExtensionArrayT", bound="NDArrayBackedExtensionArray"
)
def ravel_compat(meth: F) -> F:
"""
Decorator to ravel a 2D array before passing it to a cython operation,
then reshape the result to our own shape.
"""
@wraps(meth)
def method(self, *args, **kwargs):
if self.ndim == 1:
return meth(self, *args, **kwargs)
flags = self._ndarray.flags
flat = self.ravel("K")
result = meth(flat, *args, **kwargs)
order = "F" if flags.f_contiguous else "C"
return result.reshape(self.shape, order=order)
return cast(F, method)
class NDArrayBackedExtensionArray(ExtensionArray):
"""
ExtensionArray that is backed by a single NumPy ndarray.
"""
_ndarray: np.ndarray
def _from_backing_data(
self: NDArrayBackedExtensionArrayT, arr: np.ndarray
) -> NDArrayBackedExtensionArrayT:
"""
Construct a new ExtensionArray `new_array` with `arr` as its _ndarray.
This should round-trip:
self == self._from_backing_data(self._ndarray)
"""
raise AbstractMethodError(self)
def _box_func(self, x):
"""
Wrap numpy type in our dtype.type if necessary.
"""
return x
def _validate_scalar(self, value):
# used by NDArrayBackedExtensionIndex.insert
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
def take(
self: NDArrayBackedExtensionArrayT,
indices: Sequence[int],
*,
allow_fill: bool = False,
fill_value: Any = None,
axis: int = 0,
) -> NDArrayBackedExtensionArrayT:
if allow_fill:
fill_value = self._validate_fill_value(fill_value)
new_data = take(
self._ndarray,
# error: Argument 2 to "take" has incompatible type "Sequence[int]";
# expected "ndarray"
indices, # type: ignore[arg-type]
allow_fill=allow_fill,
fill_value=fill_value,
axis=axis,
)
return self._from_backing_data(new_data)
def _validate_fill_value(self, fill_value):
"""
If a fill_value is passed to `take` convert it to a representation
suitable for self._ndarray, raising TypeError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : native representation
Raises
------
TypeError
"""
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
# TODO: make this a cache_readonly; for that to work we need to remove
# the _index_data kludge in libreduction
@property
def shape(self) -> Shape:
return self._ndarray.shape
def __len__(self) -> int:
return self.shape[0]
@cache_readonly
def ndim(self) -> int:
return len(self.shape)
@cache_readonly
def size(self) -> int:
return self._ndarray.size
@cache_readonly
def nbytes(self) -> int:
return self._ndarray.nbytes
def reshape(
self: NDArrayBackedExtensionArrayT, *args, **kwargs
) -> NDArrayBackedExtensionArrayT:
new_data = self._ndarray.reshape(*args, **kwargs)
return self._from_backing_data(new_data)
def ravel(
self: NDArrayBackedExtensionArrayT, *args, **kwargs
) -> NDArrayBackedExtensionArrayT:
new_data = self._ndarray.ravel(*args, **kwargs)
return self._from_backing_data(new_data)
@property
def T(self: NDArrayBackedExtensionArrayT) -> NDArrayBackedExtensionArrayT:
new_data = self._ndarray.T
return self._from_backing_data(new_data)
# ------------------------------------------------------------------------
def equals(self, other) -> bool:
if type(self) is not type(other):
return False
if not is_dtype_equal(self.dtype, other.dtype):
return False
return bool(array_equivalent(self._ndarray, other._ndarray))
def _values_for_argsort(self) -> np.ndarray:
return self._ndarray
# Signature of "argmin" incompatible with supertype "ExtensionArray"
def argmin(self, axis: int = 0, skipna: bool = True): # type:ignore[override]
# override base class by adding axis keyword
validate_bool_kwarg(skipna, "skipna")
if not skipna and self.isna().any():
raise NotImplementedError
return nargminmax(self, "argmin", axis=axis)
# Signature of "argmax" incompatible with supertype "ExtensionArray"
def argmax(self, axis: int = 0, skipna: bool = True): # type:ignore[override]
# override base class by adding axis keyword
validate_bool_kwarg(skipna, "skipna")
if not skipna and self.isna().any():
raise NotImplementedError
return nargminmax(self, "argmax", axis=axis)
def copy(self: NDArrayBackedExtensionArrayT) -> NDArrayBackedExtensionArrayT:
new_data = self._ndarray.copy()
return self._from_backing_data(new_data)
def repeat(
self: NDArrayBackedExtensionArrayT, repeats, axis=None
) -> NDArrayBackedExtensionArrayT:
"""
Repeat elements of an array.
See Also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat((), {"axis": axis})
new_data = self._ndarray.repeat(repeats, axis=axis)
return self._from_backing_data(new_data)
def unique(self: NDArrayBackedExtensionArrayT) -> NDArrayBackedExtensionArrayT:
new_data = unique(self._ndarray)
return self._from_backing_data(new_data)
@classmethod
@doc(ExtensionArray._concat_same_type)
def _concat_same_type(
cls: type[NDArrayBackedExtensionArrayT],
to_concat: Sequence[NDArrayBackedExtensionArrayT],
axis: int = 0,
) -> NDArrayBackedExtensionArrayT:
dtypes = {str(x.dtype) for x in to_concat}
if len(dtypes) != 1:
raise ValueError("to_concat must have the same dtype (tz)", dtypes)
new_values = [x._ndarray for x in to_concat]
new_values = np.concatenate(new_values, axis=axis)
# error: Argument 1 to "_from_backing_data" of "NDArrayBackedExtensionArray" has
# incompatible type "List[ndarray]"; expected "ndarray"
return to_concat[0]._from_backing_data(new_values) # type: ignore[arg-type]
@doc(ExtensionArray.searchsorted)
def searchsorted(self, value, side="left", sorter=None):
value = self._validate_searchsorted_value(value)
return self._ndarray.searchsorted(value, side=side, sorter=sorter)
def _validate_searchsorted_value(self, value):
return value
@doc(ExtensionArray.shift)
def shift(self, periods=1, fill_value=None, axis=0):
fill_value = self._validate_shift_value(fill_value)
new_values = shift(self._ndarray, periods, axis, fill_value)
return self._from_backing_data(new_values)
def _validate_shift_value(self, fill_value):
# TODO: after deprecation in datetimelikearraymixin is enforced,
# we can remove this and ust validate_fill_value directly
return self._validate_fill_value(fill_value)
def __setitem__(self, key, value):
key = check_array_indexer(self, key)
value = self._validate_setitem_value(value)
self._ndarray[key] = value
def _validate_setitem_value(self, value):
return value
def __getitem__(
self: NDArrayBackedExtensionArrayT,
key: PositionalIndexer2D,
) -> NDArrayBackedExtensionArrayT | Any:
if lib.is_integer(key):
# fast-path
result = self._ndarray[key]
if self.ndim == 1:
return self._box_func(result)
return self._from_backing_data(result)
# error: Value of type variable "AnyArrayLike" of "extract_array" cannot be
# "Union[int, slice, ndarray]"
# error: Incompatible types in assignment (expression has type "ExtensionArray",
# variable has type "Union[int, slice, ndarray]")
key = extract_array( # type: ignore[type-var,assignment]
key, extract_numpy=True
)
key = check_array_indexer(self, key)
result = self._ndarray[key]
if lib.is_scalar(result):
return self._box_func(result)
result = self._from_backing_data(result)
return result
@doc(ExtensionArray.fillna)
def fillna(
self: NDArrayBackedExtensionArrayT, value=None, method=None, limit=None
) -> NDArrayBackedExtensionArrayT:
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
mask = self.isna()
# error: Argument 2 to "check_value_size" has incompatible type
# "ExtensionArray"; expected "ndarray"
value = missing.check_value_size(
value, mask, len(self) # type: ignore[arg-type]
)
if mask.any():
if method is not None:
# TODO: check value is None
# (for now) when self.ndim == 2, we assume axis=0
func = missing.get_fill_func(method, ndim=self.ndim)
new_values, _ = func(self._ndarray.T.copy(), limit=limit, mask=mask.T)
new_values = new_values.T
# TODO: PandasArray didn't used to copy, need tests for this
new_values = self._from_backing_data(new_values)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
# We validate the fill_value even if there is nothing to fill
if value is not None:
self._validate_setitem_value(value)
new_values = self.copy()
return new_values
# ------------------------------------------------------------------------
# Reductions
def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
meth = getattr(self, name, None)
if meth:
return meth(skipna=skipna, **kwargs)
else:
msg = f"'{type(self).__name__}' does not implement reduction '{name}'"
raise TypeError(msg)
def _wrap_reduction_result(self, axis: int | None, result):
if axis is None or self.ndim == 1:
return self._box_func(result)
return self._from_backing_data(result)
# ------------------------------------------------------------------------
def __repr__(self) -> str:
if self.ndim == 1:
return super().__repr__()
from pandas.io.formats.printing import format_object_summary
# the short repr has no trailing newline, while the truncated
# repr does. So we include a newline in our template, and strip
# any trailing newlines from format_object_summary
lines = [
format_object_summary(x, self._formatter(), indent_for_name=False).rstrip(
", \n"
)
for x in self
]
data = ",\n".join(lines)
class_name = f"<{type(self).__name__}>"
return f"{class_name}\n[\n{data}\n]\nShape: {self.shape}, dtype: {self.dtype}"
# ------------------------------------------------------------------------
# __array_function__ methods
def putmask(self: NDArrayBackedExtensionArrayT, mask: np.ndarray, value) -> None:
"""
Analogue to np.putmask(self, mask, value)
Parameters
----------
mask : np.ndarray[bool]
value : scalar or listlike
Raises
------
TypeError
If value cannot be cast to self.dtype.
"""
value = self._validate_setitem_value(value)
np.putmask(self._ndarray, mask, value)
def where(
self: NDArrayBackedExtensionArrayT, mask: np.ndarray, value
) -> NDArrayBackedExtensionArrayT:
"""
Analogue to np.where(mask, self, value)
Parameters
----------
mask : np.ndarray[bool]
value : scalar or listlike
Raises
------
TypeError
If value cannot be cast to self.dtype.
"""
value = self._validate_setitem_value(value)
res_values = np.where(mask, self._ndarray, value)
return self._from_backing_data(res_values)
def delete(
self: NDArrayBackedExtensionArrayT, loc, axis: int = 0
) -> NDArrayBackedExtensionArrayT:
res_values = np.delete(self._ndarray, loc, axis=axis)
return self._from_backing_data(res_values)
def swapaxes(
self: NDArrayBackedExtensionArrayT, axis1, axis2
) -> NDArrayBackedExtensionArrayT:
res_values = self._ndarray.swapaxes(axis1, axis2)
return self._from_backing_data(res_values)
# ------------------------------------------------------------------------
# Additional array methods
# These are not part of the EA API, but we implement them because
# pandas assumes they're there.
def value_counts(self, dropna: bool = True):
"""
Return a Series containing counts of unique values.
Parameters
----------
dropna : bool, default True
Don't include counts of NA values.
Returns
-------
Series
"""
if self.ndim != 1:
raise NotImplementedError
from pandas import (
Index,
Series,
)
if dropna:
# error: Unsupported operand type for ~ ("ExtensionArray")
values = self[~self.isna()]._ndarray # type: ignore[operator]
else:
values = self._ndarray
result = value_counts(values, sort=False, dropna=dropna)
index_arr = self._from_backing_data(np.asarray(result.index._data))
index = Index(index_arr, name=result.index.name)
return Series(result._values, index=index, name=result.name)
# ------------------------------------------------------------------------
# numpy-like methods
@classmethod
def _empty(
cls: type_t[NDArrayBackedExtensionArrayT], shape: Shape, dtype: ExtensionDtype
) -> NDArrayBackedExtensionArrayT:
"""
Analogous to np.empty(shape, dtype=dtype)
Parameters
----------
shape : tuple[int]
dtype : ExtensionDtype
"""
# The base implementation uses a naive approach to find the dtype
# for the backing ndarray
arr = cls._from_sequence([], dtype=dtype)
backing = np.empty(shape, dtype=arr._ndarray.dtype)
return arr._from_backing_data(backing)
| 32.488798 | 88 | 0.606131 |
417cfee5c46ffb45a3f15d6195be10b0722a8e2a
| 3,955 |
py
|
Python
|
flytekit/tools/fast_registration.py
|
v01dXYZ/flytekit
|
dfb89c619c209e6c88ae335bfb7a10892b50808a
|
[
"Apache-2.0"
] | null | null | null |
flytekit/tools/fast_registration.py
|
v01dXYZ/flytekit
|
dfb89c619c209e6c88ae335bfb7a10892b50808a
|
[
"Apache-2.0"
] | 1 |
2021-02-05T02:57:25.000Z
|
2021-02-05T02:57:25.000Z
|
flytekit/tools/fast_registration.py
|
v01dXYZ/flytekit
|
dfb89c619c209e6c88ae335bfb7a10892b50808a
|
[
"Apache-2.0"
] | null | null | null |
import os as _os
import subprocess as _subprocess
import tarfile as _tarfile
import tempfile as _tempfile
from pathlib import Path as _Path
import dirhash as _dirhash
from flytekit.interfaces.data import data_proxy as _data_proxy
from flytekit.interfaces.data.data_proxy import Data as _Data
_tmp_versions_dir = "tmp/versions"
def compute_digest(source_dir: _os.PathLike) -> str:
"""
Walks the entirety of the source dir to compute a deterministic hex digest of the dir contents.
:param _os.PathLike source_dir:
:return Text:
"""
return f"fast{_dirhash.dirhash(source_dir, 'md5', match=['*.py'])}"
def _write_marker(marker: _os.PathLike):
try:
open(marker, "x")
except FileExistsError:
pass
def filter_tar_file_fn(tarinfo: _tarfile.TarInfo) -> _tarfile.TarInfo:
"""
Excludes designated file types from tar archive
:param _tarfile.TarInfo tarinfo:
:return _tarfile.TarInfo:
"""
if tarinfo.name.endswith(".pyc"):
return None
if tarinfo.name.startswith(".cache"):
return None
if "__pycache__" in tarinfo.name:
return None
return tarinfo
def get_additional_distribution_loc(remote_location: str, identifier: str) -> str:
"""
:param Text remote_location:
:param Text identifier:
:return Text:
"""
return _os.path.join(remote_location, "{}.{}".format(identifier, "tar.gz"))
def upload_package(source_dir: _os.PathLike, identifier: str, remote_location: str, dry_run=False) -> str:
"""
Uploads the contents of the source dir as a tar package to a destination specified by the unique identifier and
remote_location.
:param _os.PathLike source_dir:
:param Text identifier:
:param Text remote_location:
:param bool dry_run:
:return Text:
"""
tmp_versions_dir = _os.path.join(_os.getcwd(), _tmp_versions_dir)
_os.makedirs(tmp_versions_dir, exist_ok=True)
marker = _Path(_os.path.join(tmp_versions_dir, identifier))
full_remote_path = get_additional_distribution_loc(remote_location, identifier)
if _os.path.exists(marker):
print("Local marker for identifier {} already exists, skipping upload".format(identifier))
return full_remote_path
if _Data.data_exists(full_remote_path):
print("Remote file {} already exists, skipping upload".format(full_remote_path))
_write_marker(marker)
return full_remote_path
with _tempfile.NamedTemporaryFile() as fp:
# Write using gzip
with _tarfile.open(fp.name, "w:gz") as tar:
tar.add(source_dir, arcname="", filter=filter_tar_file_fn)
if dry_run:
print("Would upload {} to {}".format(fp.name, full_remote_path))
else:
_Data.put_data(fp.name, full_remote_path)
print("Uploaded {} to {}".format(fp.name, full_remote_path))
# Finally, touch the marker file so we have a flag in the future to avoid re-uploading the package dir as an
# optimization
_write_marker(marker)
return full_remote_path
def download_distribution(additional_distribution: str, destination: str):
"""
Downloads a remote code distribution and overwrites any local files.
:param Text additional_distribution:
:param _os.PathLike destination:
"""
_data_proxy.Data.get_data(additional_distribution, destination)
tarfile_name = _os.path.basename(additional_distribution)
file_suffix = _Path(tarfile_name).suffixes
if len(file_suffix) != 2 or file_suffix[0] != ".tar" or file_suffix[1] != ".gz":
raise ValueError("Unrecognized additional distribution format for {}".format(additional_distribution))
# This will overwrite the existing user flyte workflow code in the current working code dir.
result = _subprocess.run(
["tar", "-xvf", _os.path.join(destination, tarfile_name), "-C", destination], stdout=_subprocess.PIPE,
)
result.check_returncode()
| 35.630631 | 115 | 0.7067 |
210d095e7d12c1c350b77c93504cbdb471c60e16
| 250 |
py
|
Python
|
tests/test_decimals.py
|
ulamlabs/aioxrpy
|
89f420a4f225b698b853a1557dff7beedd7d32ea
|
[
"MIT"
] | 7 |
2020-04-22T06:34:50.000Z
|
2021-02-22T11:02:40.000Z
|
tests/test_decimals.py
|
ulamlabs/aioxrpy
|
89f420a4f225b698b853a1557dff7beedd7d32ea
|
[
"MIT"
] | 5 |
2020-04-06T07:38:28.000Z
|
2020-04-08T11:08:11.000Z
|
tests/test_decimals.py
|
ulamlabs/aioxrpy
|
89f420a4f225b698b853a1557dff7beedd7d32ea
|
[
"MIT"
] | 2 |
2020-11-29T13:24:56.000Z
|
2021-01-17T18:14:22.000Z
|
from decimal import Decimal
from aioxrpy import decimals
def test_xrp_to_drops():
assert decimals.xrp_to_drops(Decimal('123.456789')) == 123456789
def test_drops_to_xrp():
assert decimals.drops_to_xrp(123456789) == Decimal('123.456789')
| 20.833333 | 68 | 0.764 |
99fa375f5acfe97cf48995e46de3d62c24b2147d
| 6,769 |
py
|
Python
|
pplot/scatter.py
|
thorwhalen/ut
|
353a4629c35a2cca76ef91a4d5209afe766433b4
|
[
"MIT"
] | 4 |
2016-12-17T20:06:10.000Z
|
2021-11-19T04:45:29.000Z
|
pplot/scatter.py
|
thorwhalen/ut
|
353a4629c35a2cca76ef91a4d5209afe766433b4
|
[
"MIT"
] | 11 |
2021-01-06T05:35:11.000Z
|
2022-03-11T23:28:31.000Z
|
pplot/scatter.py
|
thorwhalen/ut
|
353a4629c35a2cca76ef91a4d5209afe766433b4
|
[
"MIT"
] | 3 |
2015-06-12T10:44:16.000Z
|
2021-07-26T18:39:47.000Z
|
__author__ = 'thor'
import numpy as np
import pandas as pd
import matplotlib.pyplot as mpl_plt
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.manifold import TSNE
import prettyplotlib as ppl
# Get "Set2" colors from ColorBrewer (all colorbrewer scales: http://bl.ocks.org/mbostock/5577023)
default_colors = ['#e41a1c', '#377eb8', '#4eae4b', '#994fa1', '#ff8101', '#fdfc33', '#a8572c', '#f482be', '#999999']
default_color_blind_colors = ['#b84c7d', '#59b96f', '#7f62b8', '#adb342', '#b94c3f', '#43c9b0', '#c17e36', '#738a39']
MAX_PTS_FOR_TSNE = 1500
def plot_with_label_color(X, y, shuffle=False, decompose=None, y_to_color=default_color_blind_colors, **kwargs):
if decompose is not None:
if decompose == True:
if len(X) < MAX_PTS_FOR_TSNE:
decompose = 'tsne'
else:
decompose = 'pca'
if isinstance(decompose, str):
if decompose == 'pca':
decompose = Pipeline(steps=[('scale', StandardScaler()),
('decompose', PCA(n_components=2, whiten=True))])
elif decompose == 'tsne':
if len(X) > MAX_PTS_FOR_TSNE:
print(("TSNE would be too slow with thatm much data: Taking a set of {} random pts...".format(
MAX_PTS_FOR_TSNE)))
idx = np.random.choice(len(X), size=MAX_PTS_FOR_TSNE, replace=False)
X = X[idx, :]
y = y[idx]
decompose = Pipeline(steps=[('scale', StandardScaler()),
('decompose', TSNE(n_components=2))])
X = decompose.fit_transform(X)
else:
X = decompose(X)
if isinstance(y[0], str):
y = LabelEncoder().fit_transform(y)
if len(np.unique(y)) <= len(y_to_color):
kwargs['alpha'] = kwargs.get('alpha', 0.5)
if shuffle:
permi = np.random.permutation(len(X))
X = X[permi, :]
y = y[permi]
for i in range(len(X)):
mpl_plt.plot(X[i, 0], X[i, 1], 'o', color=y_to_color[y[i]], **kwargs)
else:
for yy in np.unique(y):
lidx = y == yy
mpl_plt.plot(X[lidx, 0], X[lidx, 1], 'o', color=y_to_color[yy], **kwargs)
else:
kwargs['alpha'] = kwargs.get('alpha', 0.4)
mpl_plt.scatter(X[:, 0], X[:, 1], c=y, **kwargs)
return decompose
def df_scatter_plot(df=None, x=None, y=None, label=None, **kwargs):
if df is None:
if y is None and x is not None:
x, y = x[:, 0], x[:, 1]
assert x is not None and y is not None and label is not None, "you need to specify x, y, and label"
df = pd.DataFrame({'x': x, 'y': y, 'label': label})
label = 'label'
x = 'x'
y = 'y'
elif label is None:
if len(df.columns) != 3:
raise ValueError("I can't (or rather won't) guess the label if there's not exactly 3 columns. "
"You need to specify it")
else:
label = [t for t in df.columns if t not in [x, y]][0]
colors = kwargs.pop('colors', None)
label_list = kwargs.pop('label_list', np.array(df[label].unique()))
fig, ax = mpl_plt.subplots(1)
for i, this_label in enumerate(label_list):
d = df[df[label] == this_label]
xvals = np.array(d[x])
yvals = np.array(d[y])
if colors:
mpl_plt.scatter(ax, xvals, yvals, label=this_label, facecolor=colors[i], **kwargs)
else:
mpl_plt.scatter(ax, xvals, yvals, label=this_label, **kwargs)
mpl_plt.legend(ax)
def factor_scatter_matrix(df, factor, color_map=None, **kwargs):
'''Create a scatter matrix of the variables in df, with differently colored
points depending on the value of df[factor].
inputs:
df: pandas.DataFrame containing the columns to be plotted, as well
as factor.
factor: string or pandas.Series. The column indicating which group
each row belongs to.
palette: A list of hex codes, at least as long as the number of groups.
If omitted, a predefined palette will be used, but it only includes
9 groups.
'''
# import matplotlib.colors
from scipy.stats import gaussian_kde
# from pyqt_fit import kde
if isinstance(df, np.ndarray):
df = pd.DataFrame(df)
if isinstance(factor, np.ndarray):
factor = pd.Series(factor)
if isinstance(factor, str):
factor_name = factor # save off the name
factor = df[factor] # extract column
df = df.drop(factor_name, axis=1) # remove from df, so it doesn't get a row and col in the plot.
else:
df = df.copy()
classes = list(set(factor))
if color_map is None:
color_map = ['#e41a1c', '#377eb8', '#4eae4b', '#994fa1', '#ff8101', '#fdfc33', '#a8572c', '#f482be', '#999999']
if not isinstance(color_map, dict):
color_map = dict(list(zip(classes, color_map)))
if len(classes) > len(color_map):
raise ValueError('''Too many groups for the number of colors provided.
We only have {} colors in the palette, but you have {}
groups.'''.format(len(color_map), len(classes)))
colors = factor.apply(lambda gr: color_map[gr])
scatter_matrix_kwargs = dict({'figsize': (10, 10), 'marker': 'o', 'c': colors, 'diagonal': None},
**kwargs.get('scatter_matrix_kwargs', {}))
axarr = pd.tools.plotting.scatter_matrix(df, **scatter_matrix_kwargs)
columns = list(df.columns)
for rc in range(len(columns)):
for group in classes:
y = df[factor == group].iloc[:, rc].values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
# if columns[rc] in log_axes:
# est = kde.KDE1D(ind, method='linear_combination', lower=0)
# kde_ind = kde.TransformKDE(est, kde.LogTransform)
# else:
# kde_ind = gkde.evaluate(ind)
axarr[rc][rc].plot(ind, gkde.evaluate(ind), c=color_map[group])
# for r in xrange(len(columns)):
# for c in xrange(len(columns)):
#
# a = axarr[r][c]
# if columns[r] in log_axes:
# # print "%d,%d: %s" % columns[r]
# a.set_yscale('symlog')
# if columns[c] in log_axes:
# # print "%d,%d: %s" % columns[c]
# a.set_xscale('symlog')
return axarr, color_map
| 40.291667 | 119 | 0.560792 |
1ad2e96d4a806e8d1104d88e59152369da5ebafb
| 4,554 |
py
|
Python
|
docker-images/taigav2/taiga-back/taiga/projects/tasks/apps.py
|
mattcongy/itshop
|
6be025a9eaa7fe7f495b5777d1f0e5a3184121c9
|
[
"MIT"
] | 1 |
2017-05-29T19:01:06.000Z
|
2017-05-29T19:01:06.000Z
|
docker-images/taigav2/taiga-back/taiga/projects/tasks/apps.py
|
mattcongy/itshop
|
6be025a9eaa7fe7f495b5777d1f0e5a3184121c9
|
[
"MIT"
] | null | null | null |
docker-images/taigav2/taiga-back/taiga/projects/tasks/apps.py
|
mattcongy/itshop
|
6be025a9eaa7fe7f495b5777d1f0e5a3184121c9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2016 Jesús Espino <[email protected]>
# Copyright (C) 2014-2016 David Barragán <[email protected]>
# Copyright (C) 2014-2016 Alejandro Alonso <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import AppConfig
from django.apps import apps
from django.db.models import signals
def connect_tasks_signals():
from taiga.projects.tagging import signals as tagging_handlers
from . import signals as handlers
# Finished date
signals.pre_save.connect(handlers.set_finished_date_when_edit_task,
sender=apps.get_model("tasks", "Task"),
dispatch_uid="set_finished_date_when_edit_task")
# Tags
signals.pre_save.connect(tagging_handlers.tags_normalization,
sender=apps.get_model("tasks", "Task"),
dispatch_uid="tags_normalization_task")
def connect_tasks_close_or_open_us_and_milestone_signals():
from . import signals as handlers
# Cached prev object version
signals.pre_save.connect(handlers.cached_prev_task,
sender=apps.get_model("tasks", "Task"),
dispatch_uid="cached_prev_task")
# Open/Close US and Milestone
signals.post_save.connect(handlers.try_to_close_or_open_us_and_milestone_when_create_or_edit_task,
sender=apps.get_model("tasks", "Task"),
dispatch_uid="try_to_close_or_open_us_and_milestone_when_create_or_edit_task")
signals.post_delete.connect(handlers.try_to_close_or_open_us_and_milestone_when_delete_task,
sender=apps.get_model("tasks", "Task"),
dispatch_uid="try_to_close_or_open_us_and_milestone_when_delete_task")
def connect_tasks_custom_attributes_signals():
from taiga.projects.custom_attributes import signals as custom_attributes_handlers
signals.post_save.connect(custom_attributes_handlers.create_custom_attribute_value_when_create_task,
sender=apps.get_model("tasks", "Task"),
dispatch_uid="create_custom_attribute_value_when_create_task")
def connect_all_tasks_signals():
connect_tasks_signals()
connect_tasks_close_or_open_us_and_milestone_signals()
connect_tasks_custom_attributes_signals()
def disconnect_tasks_signals():
signals.pre_save.disconnect(sender=apps.get_model("tasks", "Task"),
dispatch_uid="set_finished_date_when_edit_task")
signals.pre_save.disconnect(sender=apps.get_model("tasks", "Task"),
dispatch_uid="tags_normalization")
def disconnect_tasks_close_or_open_us_and_milestone_signals():
signals.pre_save.disconnect(sender=apps.get_model("tasks", "Task"),
dispatch_uid="cached_prev_task")
signals.post_save.disconnect(sender=apps.get_model("tasks", "Task"),
dispatch_uid="try_to_close_or_open_us_and_milestone_when_create_or_edit_task")
signals.post_delete.disconnect(sender=apps.get_model("tasks", "Task"),
dispatch_uid="try_to_close_or_open_us_and_milestone_when_delete_task")
def disconnect_tasks_custom_attributes_signals():
signals.post_save.disconnect(sender=apps.get_model("tasks", "Task"),
dispatch_uid="create_custom_attribute_value_when_create_task")
def disconnect_all_tasks_signals():
disconnect_tasks_signals()
disconnect_tasks_close_or_open_us_and_milestone_signals()
disconnect_tasks_custom_attributes_signals()
class TasksAppConfig(AppConfig):
name = "taiga.projects.tasks"
verbose_name = "Tasks"
def ready(self):
connect_all_tasks_signals()
| 46 | 111 | 0.706851 |
cb1aa6cceafdf894251c54a9b468c7f91850ca32
| 1,305 |
py
|
Python
|
characters/migrations/0001_initial.py
|
mpirnat/django-tutorial-v2
|
3d128301357e687542c6627f9d8eca026e04faaa
|
[
"MIT"
] | 6 |
2015-01-06T20:53:15.000Z
|
2021-09-09T12:12:03.000Z
|
characters/migrations/0001_initial.py
|
mpirnat/django-tutorial-v2
|
3d128301357e687542c6627f9d8eca026e04faaa
|
[
"MIT"
] | null | null | null |
characters/migrations/0001_initial.py
|
mpirnat/django-tutorial-v2
|
3d128301357e687542c6627f9d8eca026e04faaa
|
[
"MIT"
] | 2 |
2015-01-06T20:53:20.000Z
|
2021-09-09T12:12:04.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Character',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('background', models.TextField()),
('level', models.IntegerField()),
('experience_points', models.IntegerField()),
('max_hit_points', models.IntegerField()),
('current_hit_points', models.IntegerField()),
('strength', models.IntegerField()),
('dexterity', models.IntegerField()),
('constitution', models.IntegerField()),
('intelligence', models.IntegerField()),
('wisdom', models.IntegerField()),
('charisma', models.IntegerField()),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
],
options={
},
bases=(models.Model,),
),
]
| 35.27027 | 114 | 0.537165 |
17eda8e58efed3c3276d19797363f932ca1b8d4b
| 249 |
py
|
Python
|
opendeep/optimization/__init__.py
|
vitruvianscience/OpenDeep
|
e96efc449101094354b615cf15afe6d03644fc36
|
[
"Apache-2.0"
] | 252 |
2015-03-13T21:55:22.000Z
|
2021-09-06T21:37:38.000Z
|
opendeep/optimization/__init__.py
|
afcarl/OpenDeep
|
e96efc449101094354b615cf15afe6d03644fc36
|
[
"Apache-2.0"
] | 16 |
2015-03-14T06:47:04.000Z
|
2016-09-23T19:13:35.000Z
|
opendeep/optimization/__init__.py
|
afcarl/OpenDeep
|
e96efc449101094354b615cf15afe6d03644fc36
|
[
"Apache-2.0"
] | 68 |
2015-03-14T00:05:53.000Z
|
2020-06-04T13:36:13.000Z
|
from __future__ import division, absolute_import, print_function
from .adadelta import AdaDelta
from .adasecant import AdaSecant
from .optimizer import *
from .rmsprop import RMSProp
from .stochastic_gradient_descent import SGD
from . import loss
| 24.9 | 64 | 0.835341 |
3cfc2349f803e6d930a415680185f7b662ff8814
| 9,580 |
py
|
Python
|
doc/conf.py
|
tomasojea/seaborn
|
9b03f8138949402a351fa06e2598144812aae586
|
[
"BSD-3-Clause"
] | null | null | null |
doc/conf.py
|
tomasojea/seaborn
|
9b03f8138949402a351fa06e2598144812aae586
|
[
"BSD-3-Clause"
] | 1 |
2020-10-31T23:31:41.000Z
|
2020-10-31T23:31:41.000Z
|
doc/conf.py
|
tomasojea/seaborn
|
9b03f8138949402a351fa06e2598144812aae586
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# seaborn documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 29 23:25:46 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('sphinxext'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'matplotlib.sphinxext.plot_directive',
'gallery_generator',
'numpydoc',
'sphinx_issues',
]
# Sphinx-issues configuration
issues_github_path = 'mwaskom/seaborn'
# Generate the API documentation when building
autosummary_generate = True
numpydoc_show_class_members = False
# Include the example source for plots in API docs
plot_include_source = True
plot_formats = [("png", 90)]
plot_html_show_formats = False
plot_html_show_source_link = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'seaborn'
import time
copyright = u'2012-{}'.format(time.strftime("%Y"))
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
sys.path.insert(0, os.path.abspath(os.path.pardir))
import seaborn
version = seaborn.__version__
# The full version, including alpha/beta/rc tags.
release = seaborn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'source_link_position': "footer",
'bootswatch_theme': "paper",
'navbar_sidebarrel': False,
'bootstrap_version': "3",
'navbar_links': [
("Gallery", "examples/index"),
("Tutorial", "tutorial"),
("API", "api"),
],
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static', 'example_thumbs']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'seaborndoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'seaborn.tex', u'seaborn Documentation',
u'Michael Waskom', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'seaborn', u'seaborn Documentation',
[u'Michael Waskom'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'seaborn', u'seaborn Documentation',
u'Michael Waskom', 'seaborn', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Add the 'copybutton' javascript, to hide/show the prompt in code
# examples, originally taken from scikit-learn's doc/conf.py
def setup(app):
app.add_javascript('copybutton.js')
app.add_stylesheet('style.css')
# -- Intersphinx ------------------------------------------------
intersphinx_mapping = {
'numpy': ('https://numpy.org/doc/stable/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('https://matplotlib.org/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'statsmodels': ('https://www.statsmodels.org/stable/', None)
}
| 31.933333 | 80 | 0.704697 |
6fa787aec5174e63cf4e0740b510dbddc3da5d91
| 1,993 |
py
|
Python
|
bluefoglite/common/collective_comm/broadcast.py
|
Bluefog-Lib/Bluefog-Lite
|
7175a1731f1a401ee55ce8ad94855f0c03ad5604
|
[
"Apache-2.0"
] | null | null | null |
bluefoglite/common/collective_comm/broadcast.py
|
Bluefog-Lib/Bluefog-Lite
|
7175a1731f1a401ee55ce8ad94855f0c03ad5604
|
[
"Apache-2.0"
] | null | null | null |
bluefoglite/common/collective_comm/broadcast.py
|
Bluefog-Lib/Bluefog-Lite
|
7175a1731f1a401ee55ce8ad94855f0c03ad5604
|
[
"Apache-2.0"
] | null | null | null |
import math
from typing import List
from bluefoglite.common.tcp.agent import AgentContext
from bluefoglite.common.tcp.buffer import SpecifiedBuffer
def broadcast_one_to_all(
buf: SpecifiedBuffer, root_rank: int, context: AgentContext, *, tag=0
):
# assume the input are all well-defined and behaved.
if context.rank != root_rank:
buf.recv(root_rank)
return
handles: List[int] = []
for i in range(context.size):
if i == context.rank:
continue
handles.append(buf.isend(i))
for h in handles:
buf.waitCompletion(h)
def broadcast_ring(
buf: SpecifiedBuffer, root_rank: int, context: AgentContext, *, tag=0
):
virtual_rank = (context.rank - root_rank) % context.size
next_rank = (context.rank + 1) % context.size
prev_rank = (context.rank - 1) % context.size
for r in range(context.size - 1):
if virtual_rank == r:
buf.send(next_rank)
elif virtual_rank == r + 1:
buf.recv(prev_rank)
else:
pass
def broadcast_spreading(
buf: SpecifiedBuffer, root_rank: int, context: AgentContext, *, tag=0
):
# Using the 0->1 | 0->2, 1->3 | 0->4, 1->5, 2->6, 3->9 style.
virtual_rank = (context.rank - root_rank) % context.size
rounds = math.ceil(math.log2(context.size))
for r in range(rounds):
rank_diff = 1 << r
if virtual_rank < rank_diff:
virutal_send_to = virtual_rank + rank_diff
if virutal_send_to > context.size:
continue
real_send_to = (virutal_send_to + root_rank) % context.size
buf.send(real_send_to)
elif rank_diff <= virtual_rank < 2 * rank_diff:
virutal_recv_from = virtual_rank - rank_diff
if virutal_recv_from < 0: # impossible.
continue
real_recv_from = (virutal_recv_from + root_rank) % context.size
buf.recv(real_recv_from)
else:
pass
| 30.661538 | 75 | 0.617662 |
d3e19254657066386277d00ca1ce44a5504fa530
| 706 |
py
|
Python
|
024.py
|
xianlinfeng/project_euler_python3
|
77eca44eb2b1d13bc70d6dc0258b737449d43a23
|
[
"MIT"
] | null | null | null |
024.py
|
xianlinfeng/project_euler_python3
|
77eca44eb2b1d13bc70d6dc0258b737449d43a23
|
[
"MIT"
] | null | null | null |
024.py
|
xianlinfeng/project_euler_python3
|
77eca44eb2b1d13bc70d6dc0258b737449d43a23
|
[
"MIT"
] | null | null | null |
import itertools
def next(nums):
for i in range(len(nums)-1, 0, -1):
if nums[i-1] < nums[i]:
_, j = min((v, j)
for (j, v) in enumerate(nums[i:]) if v > nums[i-1])
nums[i-1], nums[i+j] = nums[i+j], nums[i-1]
nums[i:] = sorted(nums[i:])
return nums
else:
pass
return None
def compute():
""" method 2 """
arr = list(range(10))
temp = itertools.islice(itertools.permutations(arr), 999999, None)
return "".join(str(x) for x in next(temp))
if __name__ == "__main__":
List = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
for i in range(1, 1000000):
List = next(List)
print(List)
| 24.344828 | 74 | 0.494334 |
574a832e9184b5c4a7275d6e754cd3e5b4958e75
| 7,078 |
py
|
Python
|
rllab/sampler/base.py
|
Bobeye/rllab
|
53c0afb73f93c4a78ff21507914d7f7735c21ea9
|
[
"MIT"
] | 1,838 |
2017-08-10T04:19:28.000Z
|
2022-03-29T07:41:19.000Z
|
rllab/sampler/base.py
|
Bobeye/rllab
|
53c0afb73f93c4a78ff21507914d7f7735c21ea9
|
[
"MIT"
] | 120 |
2016-10-05T09:16:16.000Z
|
2017-07-27T22:57:31.000Z
|
rllab/sampler/base.py
|
Bobeye/rllab
|
53c0afb73f93c4a78ff21507914d7f7735c21ea9
|
[
"MIT"
] | 498 |
2017-08-16T03:34:28.000Z
|
2022-03-31T04:41:32.000Z
|
import numpy as np
from rllab.misc import special
from rllab.misc import tensor_utils
from rllab.algos import util
import rllab.misc.logger as logger
class Sampler(object):
def start_worker(self):
"""
Initialize the sampler, e.g. launching parallel workers if necessary.
"""
raise NotImplementedError
def obtain_samples(self, itr):
"""
Collect samples for the given iteration number.
:param itr: Iteration number.
:return: A list of paths.
"""
raise NotImplementedError
def process_samples(self, itr, paths):
"""
Return processed sample data (typically a dictionary of concatenated tensors) based on the collected paths.
:param itr: Iteration number.
:param paths: A list of collected paths.
:return: Processed sample data.
"""
raise NotImplementedError
def shutdown_worker(self):
"""
Terminate workers if necessary.
"""
raise NotImplementedError
class BaseSampler(Sampler):
def __init__(self, algo):
"""
:type algo: BatchPolopt
"""
self.algo = algo
def process_samples(self, itr, paths):
baselines = []
returns = []
if hasattr(self.algo.baseline, "predict_n"):
all_path_baselines = self.algo.baseline.predict_n(paths)
else:
all_path_baselines = [self.algo.baseline.predict(path) for path in paths]
for idx, path in enumerate(paths):
path_baselines = np.append(all_path_baselines[idx], 0)
deltas = path["rewards"] + \
self.algo.discount * path_baselines[1:] - \
path_baselines[:-1]
path["advantages"] = special.discount_cumsum(
deltas, self.algo.discount * self.algo.gae_lambda)
path["returns"] = special.discount_cumsum(path["rewards"], self.algo.discount)
baselines.append(path_baselines[:-1])
returns.append(path["returns"])
ev = special.explained_variance_1d(
np.concatenate(baselines),
np.concatenate(returns)
)
if not self.algo.policy.recurrent:
observations = tensor_utils.concat_tensor_list([path["observations"] for path in paths])
actions = tensor_utils.concat_tensor_list([path["actions"] for path in paths])
rewards = tensor_utils.concat_tensor_list([path["rewards"] for path in paths])
returns = tensor_utils.concat_tensor_list([path["returns"] for path in paths])
advantages = tensor_utils.concat_tensor_list([path["advantages"] for path in paths])
env_infos = tensor_utils.concat_tensor_dict_list([path["env_infos"] for path in paths])
agent_infos = tensor_utils.concat_tensor_dict_list([path["agent_infos"] for path in paths])
if self.algo.center_adv:
advantages = util.center_advantages(advantages)
if self.algo.positive_adv:
advantages = util.shift_advantages_to_positive(advantages)
average_discounted_return = \
np.mean([path["returns"][0] for path in paths])
undiscounted_returns = [sum(path["rewards"]) for path in paths]
ent = np.mean(self.algo.policy.distribution.entropy(agent_infos))
samples_data = dict(
observations=observations,
actions=actions,
rewards=rewards,
returns=returns,
advantages=advantages,
env_infos=env_infos,
agent_infos=agent_infos,
paths=paths,
)
else:
max_path_length = max([len(path["advantages"]) for path in paths])
# make all paths the same length (pad extra advantages with 0)
obs = [path["observations"] for path in paths]
obs = tensor_utils.pad_tensor_n(obs, max_path_length)
if self.algo.center_adv:
raw_adv = np.concatenate([path["advantages"] for path in paths])
adv_mean = np.mean(raw_adv)
adv_std = np.std(raw_adv) + 1e-8
adv = [(path["advantages"] - adv_mean) / adv_std for path in paths]
else:
adv = [path["advantages"] for path in paths]
adv = np.asarray([tensor_utils.pad_tensor(a, max_path_length) for a in adv])
actions = [path["actions"] for path in paths]
actions = tensor_utils.pad_tensor_n(actions, max_path_length)
rewards = [path["rewards"] for path in paths]
rewards = tensor_utils.pad_tensor_n(rewards, max_path_length)
returns = [path["returns"] for path in paths]
returns = tensor_utils.pad_tensor_n(returns, max_path_length)
agent_infos = [path["agent_infos"] for path in paths]
agent_infos = tensor_utils.stack_tensor_dict_list(
[tensor_utils.pad_tensor_dict(p, max_path_length) for p in agent_infos]
)
env_infos = [path["env_infos"] for path in paths]
env_infos = tensor_utils.stack_tensor_dict_list(
[tensor_utils.pad_tensor_dict(p, max_path_length) for p in env_infos]
)
valids = [np.ones_like(path["returns"]) for path in paths]
valids = tensor_utils.pad_tensor_n(valids, max_path_length)
average_discounted_return = \
np.mean([path["returns"][0] for path in paths])
undiscounted_returns = [sum(path["rewards"]) for path in paths]
ent = np.sum(self.algo.policy.distribution.entropy(agent_infos) * valids) / np.sum(valids)
samples_data = dict(
observations=obs,
actions=actions,
advantages=adv,
rewards=rewards,
returns=returns,
valids=valids,
agent_infos=agent_infos,
env_infos=env_infos,
paths=paths,
)
logger.log("fitting baseline...")
if hasattr(self.algo.baseline, 'fit_with_samples'):
self.algo.baseline.fit_with_samples(paths, samples_data)
else:
self.algo.baseline.fit(paths)
logger.log("fitted")
logger.record_tabular('Iteration', itr)
logger.record_tabular('AverageDiscountedReturn',
average_discounted_return)
logger.record_tabular('AverageReturn', np.mean(undiscounted_returns))
logger.record_tabular('ExplainedVariance', ev)
logger.record_tabular('NumTrajs', len(paths))
logger.record_tabular('Entropy', ent)
logger.record_tabular('Perplexity', np.exp(ent))
logger.record_tabular('StdReturn', np.std(undiscounted_returns))
logger.record_tabular('MaxReturn', np.max(undiscounted_returns))
logger.record_tabular('MinReturn', np.min(undiscounted_returns))
return samples_data
| 38.677596 | 115 | 0.606527 |
47bde410c420828145f25d848ddb4876fcf582ac
| 3,845 |
py
|
Python
|
helper.py
|
ctangarife/nsearch
|
7de1534db51d9441ec28db4c7c2f7256630a3db0
|
[
"Apache-2.0"
] | null | null | null |
helper.py
|
ctangarife/nsearch
|
7de1534db51d9441ec28db4c7c2f7256630a3db0
|
[
"Apache-2.0"
] | null | null | null |
helper.py
|
ctangarife/nsearch
|
7de1534db51d9441ec28db4c7c2f7256630a3db0
|
[
"Apache-2.0"
] | 1 |
2020-10-30T19:57:16.000Z
|
2020-10-30T19:57:16.000Z
|
import dbmodule
from dbmodule import *
class Helper:
def __init__(self,args="",commnad=""):
self.args = args
self.commnad = commnad
#process the commnads
def process(self):
if self.commnad == "search":
if not self.args:
dbmodule.lastresults = dbmodule.searchAll()
self.printlastResult()
else:
dbmodule.lastresults = dbmodule.searchByCriterial(**self.__searchparams())
self.printlastResult()
elif self.commnad == "addfav":
dbmodule.createFavorite(**self.__addfavparams())
elif self.commnad == "modfav":
dbmodule.updateFavorite(**self.__modfavparams())
elif self.commnad == "delfav":
dbmodule.deleteFavorite(**self.__delfavparams())
elif self.commnad == "showfav":
if not self.args:
dbmodule.lastresults = dbmodule.getFavorites()
self.printlastResult(True)
else:
dbmodule.lastresults = dbmodule.getFavorites(**self.__showfavparams())
self.printlastResult(True)
else:
print "Error"
# Display the last results
def printlastResult(self,fav=False):
if fav:
print("\033[1;32m*** {0:40} {1:40}\033[0m".format(*["Name","Ranking"]))
for key,value in dbmodule.lastresults.items():
print("\033[1;32m[+] {0:40} {1:35}\033[0m".format(*[value["name"],value["ranking"]]))
else:
print("\033[1;32m*** {0:40} {1:40}\033[0m".format(*["Name","Author"]))
for key,value in dbmodule.lastresults.items():
print("\033[1;32m[+] {0:40} {1:35}\033[0m".format(*[value["name"],value["author"]]))
# Display the documentation per script
def displayDoc(self):
scriptFile = open(dbmodule.scriptsPath+self.args,'r')
lines = scriptFile.read().splitlines()
for line in lines:
if line.startswith("license"):
break
print('\033[1;96m'+line+'\033[0m')
scriptFile.close()
# used for the autocomplete
def resultitems(self):
i = 0
items = []
for k,v in dbmodule.lastresults.items():
items.insert(i,v["name"])
i = i + 1
return items
# private function to set params for search command
def __searchparams(self):
if self.args.find('name:') != -1 or self.args.find('category:') != -1 or self.args.find('author:') != -1:
return self.__setParams()
#private funtion to set params for addfav command
def __addfavparams(self):
if self.args.find('name:') != -1 or self.args.find('ranking:') != -1:
return self.__setParams()
#private funtion to set params for delfav command
def __delfavparams(self):
if self.args.find('name:') != -1:
return self.__setParams()
#private function to set params for modfav command
def __modfavparams(self):
if self.args.find('name:') != -1 or self.args.find('newname:') != -1 or self.args.find('newranking:') != -1:
return self.__setParams()
#private function to set paramas for showfav command
def __showfavparams(self):
if self.args.find('name:') != -1 or self.args.find('ranking:') != -1:
return self.__setParams()
# Set Params validations
def __setParams(self):
argsdic = {}
if len(self.args.split(":")) >= 4:
argsdic.update({
self.args.split(":")[0]:self.args.split(":")[1].split(" ")[0],
self.args.split(":")[1].split(" ")[1]:self.args.split(":")[2].split(" ")[0],
self.args.split(":")[2].split(" ")[1]:self.args.split(":")[3].split(" ")[0]})
elif len(self.args.split(":")) == 3:
argsdic.update({
self.args.split(":")[0]:self.args.split(":")[1].split(" ")[0],
self.args.split(":")[1].split(" ")[1]:self.args.split(":")[2].split(" ")[0]})
elif len(self.args.split(":")) == 2:
argsdic.update({self.args.split(":")[0]:self.args.split(":")[1].split(" ")[0]})
else:
print "Plase enter a correct commands"
return argsdic
| 35.601852 | 112 | 0.615605 |
dfebf930087efed7d62463d4094004bd0ed755fd
| 70,669 |
py
|
Python
|
src/sage/graphs/generators/random.py
|
tashakim/sage
|
467fbc70a08b552b3de33d9065204ee9cbfb02c7
|
[
"BSL-1.0"
] | 4 |
2020-07-17T04:49:44.000Z
|
2020-07-29T06:33:51.000Z
|
src/sage/graphs/generators/random.py
|
Ivo-Maffei/sage
|
467fbc70a08b552b3de33d9065204ee9cbfb02c7
|
[
"BSL-1.0"
] | null | null | null |
src/sage/graphs/generators/random.py
|
Ivo-Maffei/sage
|
467fbc70a08b552b3de33d9065204ee9cbfb02c7
|
[
"BSL-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
r"""
Random Graphs
The methods defined here appear in :mod:`sage.graphs.graph_generators`.
"""
###########################################################################
#
# Copyright (C) 2006 Robert L. Miller <[email protected]>
# and Emily A. Kirkman
# Copyright (C) 2009 Michael C. Yurko <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
###########################################################################
from __future__ import print_function, division
import sys
# import from Sage library
from sage.graphs.graph import Graph
from sage.misc.randstate import current_randstate
from sage.misc.prandom import randint
def RandomGNP(n, p, seed=None, fast=True, algorithm='Sage'):
r"""
Returns a random graph on `n` nodes. Each edge is inserted independently
with probability `p`.
INPUT:
- ``n`` -- number of nodes of the graph
- ``p`` -- probability of an edge
- ``seed`` - a ``random.Random`` seed or a Python ``int`` for the random
number generator (default: ``None``).
- ``fast`` -- boolean set to True (default) to use the algorithm with
time complexity in `O(n+m)` proposed in [BB2005a]_. It is designed
for generating large sparse graphs. It is faster than other algorithms for
*LARGE* instances (try it to know whether it is useful for you).
- ``algorithm`` -- By default (```algorithm='Sage'``), this function uses the
algorithm implemented in ```sage.graphs.graph_generators_pyx.pyx``. When
``algorithm='networkx'``, this function calls the NetworkX function
``fast_gnp_random_graph``, unless ``fast=False``, then
``gnp_random_graph``. Try them to know which algorithm is the best for
you. The ``fast`` parameter is not taken into account by the 'Sage'
algorithm so far.
REFERENCES:
- [ER1959]_
- [Gil1959]_
PLOTTING: When plotting, this graph will use the default spring-layout
algorithm, unless a position dictionary is specified.
EXAMPLES: We show the edge list of a random graph on 6 nodes with
probability `p = .4`::
sage: set_random_seed(0)
sage: graphs.RandomGNP(6, .4).edges(labels=False)
[(0, 1), (0, 5), (1, 2), (2, 4), (3, 4), (3, 5), (4, 5)]
We plot a random graph on 12 nodes with probability `p = .71`::
sage: gnp = graphs.RandomGNP(12,.71)
sage: gnp.show() # long time
We view many random graphs using a graphics array::
sage: g = []
sage: j = []
sage: for i in range(9):
....: k = graphs.RandomGNP(i+3,.43)
....: g.append(k)
sage: for i in range(3):
....: n = []
....: for m in range(3):
....: n.append(g[3*i + m].plot(vertex_size=50, vertex_labels=False))
....: j.append(n)
sage: G = graphics_array(j)
sage: G.show() # long time
sage: graphs.RandomGNP(4,1)
Complete graph: Graph on 4 vertices
TESTS::
sage: graphs.RandomGNP(50,.2,algorithm=50)
Traceback (most recent call last):
...
ValueError: 'algorithm' must be equal to 'networkx' or to 'Sage'.
sage: set_random_seed(0)
sage: graphs.RandomGNP(50,.2, algorithm="Sage").size()
243
sage: graphs.RandomGNP(50,.2, algorithm="networkx").size()
260 # 32-bit
245 # 64-bit
"""
if n < 0:
raise ValueError("The number of nodes must be positive or null.")
if 0.0 > p or 1.0 < p:
raise ValueError("The probability p must be in [0..1].")
if seed is None:
seed = int(current_randstate().long_seed() % sys.maxsize)
if p == 1:
from sage.graphs.generators.basic import CompleteGraph
return CompleteGraph(n)
if algorithm == 'networkx':
import networkx
if fast:
G = networkx.fast_gnp_random_graph(n, p, seed=seed)
else:
G = networkx.gnp_random_graph(n, p, seed=seed)
return Graph(G)
elif algorithm in ['Sage', 'sage']:
# We use the Sage generator
from sage.graphs.graph_generators_pyx import RandomGNP as sageGNP
return sageGNP(n, p)
else:
raise ValueError("'algorithm' must be equal to 'networkx' or to 'Sage'.")
def RandomBarabasiAlbert(n, m, seed=None):
r"""
Return a random graph created using the Barabasi-Albert preferential
attachment model.
A graph with `m` vertices and no edges is initialized, and a graph of `n`
vertices is grown by attaching new vertices each with `m` edges that are
attached to existing vertices, preferentially with high degree.
INPUT:
- ``n`` -- number of vertices in the graph
- ``m`` -- number of edges to attach from each new node
- ``seed`` -- a ``random.Random`` seed or a Python ``int`` for the random
number generator (default: ``None``)
EXAMPLES:
We show the edge list of a random graph on 6 nodes with `m = 2`::
sage: G = graphs.RandomBarabasiAlbert(6,2)
sage: G.order(), G.size()
(6, 8)
sage: G.degree_sequence() # random
[4, 3, 3, 2, 2, 2]
We plot a random graph on 12 nodes with `m = 3`::
sage: ba = graphs.RandomBarabasiAlbert(12,3)
sage: ba.show() # long time
We view many random graphs using a graphics array::
sage: g = []
sage: j = []
sage: for i in range(1,10):
....: k = graphs.RandomBarabasiAlbert(i+3, 3)
....: g.append(k)
sage: for i in range(3):
....: n = []
....: for m in range(3):
....: n.append(g[3*i + m].plot(vertex_size=50, vertex_labels=False))
....: j.append(n)
sage: G = graphics_array(j)
sage: G.show() # long time
When `m = 1`, the generated graph is a tree::
sage: graphs.RandomBarabasiAlbert(6, 1).is_tree()
True
"""
if seed is None:
seed = int(current_randstate().long_seed() % sys.maxsize)
import networkx
return Graph(networkx.barabasi_albert_graph(n, m, seed=seed))
def RandomBipartite(n1, n2, p, set_position=False):
r"""
Returns a bipartite graph with `n1+n2` vertices such that any edge
from `[n1]` to `[n2]` exists with probability `p`.
INPUT:
- ``n1, n2`` -- Cardinalities of the two sets
- ``p`` -- Probability for an edge to exist
- ``set_position`` -- boolean (default ``False``); if set to ``True``, we
assign positions to the vertices so that the set of cardinality `n1` is
on the line `y=1` and the set of cardinality `n2` is on the line `y=0`.
EXAMPLES::
sage: g = graphs.RandomBipartite(5, 2, 0.5)
sage: g.vertices()
[(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (1, 0), (1, 1)]
TESTS::
sage: g = graphs.RandomBipartite(5, -3, 0.5)
Traceback (most recent call last):
...
ValueError: n1 and n2 should be integers strictly greater than 0
sage: g = graphs.RandomBipartite(5, 3, 1.5)
Traceback (most recent call last):
...
ValueError: parameter p is a probability, and so should be a real value between 0 and 1
:trac:`12155`::
sage: graphs.RandomBipartite(5, 6, .2).complement()
complement(Random bipartite graph of order 5+6 with edge probability 0.200000000000000): Graph on 11 vertices
Test assigned positions::
sage: graphs.RandomBipartite(1, 2, .1, set_position=True).get_pos()
{(0, 0): (1, 1.0), (1, 0): (0, 0), (1, 1): (2.0, 0.0)}
sage: graphs.RandomBipartite(2, 1, .1, set_position=True).get_pos()
{(0, 0): (0, 1), (0, 1): (2.0, 1.0), (1, 0): (1, 0.0)}
sage: graphs.RandomBipartite(2, 2, .1, set_position=True).get_pos()
{(0, 0): (0, 1), (0, 1): (2.0, 1.0), (1, 0): (0, 0), (1, 1): (2.0, 0.0)}
sage: graphs.RandomBipartite(2, 2, .1, set_position=False).get_pos()
"""
if not (p >= 0 and p <= 1):
raise ValueError("parameter p is a probability, and so should be a real value between 0 and 1")
if not (n1 > 0 and n2 > 0):
raise ValueError("n1 and n2 should be integers strictly greater than 0")
from numpy.random import uniform
g=Graph(name="Random bipartite graph of order "+str(n1) +"+"+str(n2)+" with edge probability "+str(p))
S1 = [(0,i) for i in range(n1)]
S2 = [(1,i) for i in range(n2)]
g.add_vertices(S1)
g.add_vertices(S2)
for w in range(n2):
for v in range(n1):
if uniform() <= p :
g.add_edge((0, v), (1, w))
# We now assign positions to vertices:
# - vertices in S1 are placed on the line from (0, 1) to (max(n1, n2), 1)
# - vertices in S2 are placed on the line from (0, 0) to (max(n1, n2), 0)
# If S1 or S2 has a single vertex, it is centered in the line.
if set_position:
nmax = max(n1, n2)
g._line_embedding(S1, first=(0, 1), last=(nmax, 1))
g._line_embedding(S2, first=(0, 0), last=(nmax, 0))
return g
def RandomRegularBipartite(n1, n2, d1, set_position=False):
r"""
Return a random regular bipartite graph on `n1 + n2` vertices.
The bipartite graph has `n1 * d1` edges. Hence, `n2` must divide `n1 * d1`.
Each vertex of the set of cardinality `n1` has degree `d1` (which can be at
most `n2`) and each vertex in the set of cardinality `n2` has degree
`(n1 * d1) / n2`. The bipartite graph has no multiple edges.
This generator implements an algorithm inspired by that of [MW1990]_ for
the uniform generation of random regular bipartite graphs. It performs well
when `d1 = o(n2^{1/3})` or (`n2 - d1 = o(n2^{1/3})`). In other cases, the
running time can be huge. Note that the currently implemented algorithm
does not generate uniformly random graphs.
INPUT:
- ``n1, n2`` -- number of vertices in each side
- ``d1`` -- degree of the vertices in the set of cardinality `n1`.
- ``set_position`` -- boolean (default ``False``); if set to ``True``, we
assign positions to the vertices so that the set of cardinality `n1` is
on the line `y=1` and the set of cardinality `n2` is on the line `y=0`.
EXAMPLES::
sage: g = graphs.RandomRegularBipartite(4, 6, 3)
sage: g.order(), g.size()
(10, 12)
sage: set(g.degree())
{2, 3}
sage: graphs.RandomRegularBipartite(1, 2, 2, set_position=True).get_pos()
{0: (1, 1.0), 1: (0, 0), 2: (2.0, 0.0)}
sage: graphs.RandomRegularBipartite(2, 1, 1, set_position=True).get_pos()
{0: (0, 1), 1: (2.0, 1.0), 2: (1, 0.0)}
sage: graphs.RandomRegularBipartite(2, 3, 3, set_position=True).get_pos()
{0: (0, 1), 1: (3.0, 1.0), 2: (0, 0), 3: (1.5, 0.0), 4: (3.0, 0.0)}
sage: graphs.RandomRegularBipartite(2, 3, 3, set_position=False).get_pos()
TESTS:
Giving invalid parameters::
sage: graphs.RandomRegularBipartite(0, 2, 1)
Traceback (most recent call last):
...
ValueError: n1 and n2 must be integers greater than 0
sage: graphs.RandomRegularBipartite(2, 3, 2)
Traceback (most recent call last):
...
ValueError: the product n1 * d1 must be a multiple of n2
sage: graphs.RandomRegularBipartite(1, 1, 2)
Traceback (most recent call last):
...
ValueError: d1 must be less than or equal to n2
"""
if n1 < 1 or n2 < 1:
raise ValueError("n1 and n2 must be integers greater than 0")
if d1 > n2:
raise ValueError("d1 must be less than or equal to n2")
d2 = (n1 * d1) // n2
if n1 * d1 != n2 * d2:
raise ValueError("the product n1 * d1 must be a multiple of n2")
complement = False
if d1 > n2/2 or d2 > n1/2:
# We build the complement graph instead
complement = True
d1 = n2 - d1
d2 = n1 - d2
E = set()
F = set()
if d1:
from sage.misc.prandom import shuffle, choice
M1 = n1 * d1 * (d1 - 1)
M2 = n2 * d2 * (d2 - 1)
M = n1 * d1 + n2 * d2
UB_parallel = (M1 * M2) / M**2
# We create a set of n1 * d1 random edges with possible repetitions. We
# require that the number of repeated edges is bounded and that an edge
# can be repeated only once.
L = [u for u in range(n1) for i in range(d1)]
R = [u for u in range(n1, n1 + n2) for i in range(d2)]
restart = True
while restart:
restart = False
shuffle(R)
E = set()
F = set()
for e in zip(L, R):
if e in E:
if e in F:
# We have more than 2 times e => restart
restart = True
break
else:
F.add(e)
if len(F) >= UB_parallel:
# We have too many parallel edges
restart = True
break
else:
E.add(e)
# We remove multiple edges by applying random forward d-switching. That is,
# given edge e that is repeated twice, we select single edges f and g with
# no common end points, and then create 4 new edges. We forbid creating new
# multiple edges.
while F:
# random forward d-switching
e = F.pop()
E.discard(e)
TE = tuple(E.difference(F))
# We select 2 vertex disjoint edges
while True:
f = choice(TE)
if e[0] == f[0] or e[1] == f[1]:
continue
g = choice(TE)
if e[0] != g[0] and e[1] != g[1] and f[0] != g[0] and f[1] != g[1]:
new_edges = [(f[0], e[1]), (e[0], f[1]), (e[0], g[1]), (g[0], e[1])]
if not E.intersection(new_edges):
# We are not creating new parallel edges.
# To generate uniformly random graphs we would have to
# implement a probabilistic restart of the whole algorithm
# here, see [MW1990].
break
E.discard(f)
E.discard(g)
E.update(new_edges)
if complement:
from sage.graphs.generators.basic import CompleteBipartiteGraph
E = E.symmetric_difference(CompleteBipartiteGraph(n1, n2).edges(labels=False))
d1, d2 = n2 - d1, n1 - d2
name = "Random regular bipartite graph of order {}+{} and degrees {} and {}".format(n1, n2, d1, d2)
G = Graph(list(E), name=name)
# We now assign positions to vertices:
# - vertices 0,..,n1-1 are placed on the line (0, 1) to (max(n1, n2), 1)
# - vertices n1,..,n1+n2-1 are placed on the line (0, 0) to (max(n1, n2), 0)
# If n1 (or n2) is 1, the vertex is centered in the line.
if set_position:
nmax = max(n1, n2)
G._line_embedding(list(range(n1)), first=(0, 1), last=(nmax, 1))
G._line_embedding(list(range(n1, n1+n2)), first=(0, 0), last=(nmax, 0))
return G
def RandomBlockGraph(m, k, kmax=None, incidence_structure=False):
r"""
Return a Random Block Graph.
A block graph is a connected graph in which every biconnected component
(block) is a clique.
.. SEEALSO::
- :wikipedia:`Block_graph` for more details on these graphs
- :meth:`~sage.graphs.graph.Graph.is_block_graph` -- test if a graph is a block graph
- :meth:`~sage.graphs.generic_graph.GenericGraph.blocks_and_cut_vertices`
- :meth:`~sage.graphs.generic_graph.GenericGraph.blocks_and_cuts_tree`
- :meth:`~sage.combinat.designs.incidence_structures.IncidenceStructure`
INPUT:
- ``m`` -- integer; number of blocks (at least one).
- ``k`` -- integer; minimum number of vertices of a block (at least two).
- ``kmax`` -- integer (default: ``None``) By default, each block has `k`
vertices. When the parameter `kmax` is specified (with `kmax \geq k`), the
number of vertices of each block is randomly chosen between `k` and
`kmax`.
- ``incidence_structure`` -- boolean (default: ``False``) when set to
``True``, the incidence structure of the graphs is returned instead of the
graph itself, that is the list of the lists of vertices in each
block. This is useful for the creation of some hypergraphs.
OUTPUT:
A Graph when ``incidence_structure==False`` (default), and otherwise an
incidence structure.
EXAMPLES:
A block graph with a single block is a clique::
sage: B = graphs.RandomBlockGraph(1, 4)
sage: B.is_clique()
True
A block graph with blocks of order 2 is a tree::
sage: B = graphs.RandomBlockGraph(10, 2)
sage: B.is_tree()
True
Every biconnected component of a block graph is a clique::
sage: B = graphs.RandomBlockGraph(5, 3, kmax=6)
sage: blocks,cuts = B.blocks_and_cut_vertices()
sage: all(B.is_clique(block) for block in blocks)
True
A block graph with blocks of order `k` has `m*(k-1)+1` vertices::
sage: m, k = 6, 4
sage: B = graphs.RandomBlockGraph(m, k)
sage: B.order() == m*(k-1)+1
True
Test recognition methods::
sage: B = graphs.RandomBlockGraph(6, 2, kmax=6)
sage: B.is_block_graph()
True
sage: B in graph_classes.Block
True
Asking for the incidence structure::
sage: m, k = 6, 4
sage: IS = graphs.RandomBlockGraph(m, k, incidence_structure=True)
sage: from sage.combinat.designs.incidence_structures import IncidenceStructure
sage: IncidenceStructure(IS)
Incidence structure with 19 points and 6 blocks
sage: m*(k-1)+1
19
TESTS:
A block graph has at least one block, so `m\geq 1`::
sage: B = graphs.RandomBlockGraph(0, 1)
Traceback (most recent call last):
...
ValueError: the number `m` of blocks must be >= 1
A block has at least 2 vertices, so `k\geq 2`::
sage: B = graphs.RandomBlockGraph(1, 1)
Traceback (most recent call last):
...
ValueError: the minimum number `k` of vertices in a block must be >= 2
The maximum size of a block is at least its minimum size, so `k\leq kmax`::
sage: B = graphs.RandomBlockGraph(1, 3, kmax=2)
Traceback (most recent call last):
...
ValueError: the maximum number `kmax` of vertices in a block must be >= `k`
"""
from sage.misc.prandom import choice
from sage.sets.disjoint_set import DisjointSet
if m < 1:
raise ValueError("the number `m` of blocks must be >= 1")
if k < 2:
raise ValueError("the minimum number `k` of vertices in a block must be >= 2")
if kmax is None:
kmax = k
elif kmax < k:
raise ValueError("the maximum number `kmax` of vertices in a block must be >= `k`")
if m == 1:
# A block graph with a single block is a clique
IS = [ list(range(randint(k, kmax))) ]
elif kmax == 2:
# A block graph with blocks of order 2 is a tree
IS = [ list(e) for e in RandomTree(m+1).edges(labels=False) ]
else:
# We start with a random tree of order m
T = RandomTree(m)
# We create a block of order in range [k,kmax] per vertex of the tree
B = {u:[(u,i) for i in range(randint(k, kmax))] for u in T}
# For each edge of the tree, we choose 1 vertex in each of the
# corresponding blocks and we merge them. We use a disjoint set data
# structure to keep a unique identifier per merged vertices
DS = DisjointSet([i for u in B for i in B[u]])
for u,v in T.edges(labels=0):
DS.union(choice(B[u]), choice(B[v]))
# We relabel vertices in the range [0, m*(k-1)] and build the incidence
# structure
new_label = {root:i for i,root in enumerate(DS.root_to_elements_dict())}
IS = [ [new_label[DS.find(v)] for v in B[u]] for u in B ]
if incidence_structure:
return IS
# We finally build the block graph
if k == kmax:
BG = Graph(name = "Random Block Graph with {} blocks of order {}".format(m, k))
else:
BG = Graph(name = "Random Block Graph with {} blocks of order {} to {}".format(m, k, kmax))
for block in IS:
BG.add_clique( block )
return BG
def RandomBoundedToleranceGraph(n):
r"""
Returns a random bounded tolerance graph.
The random tolerance graph is built from a random bounded
tolerance representation by using the function
`ToleranceGraph`. This representation is a list
`((l_0,r_0,t_0), (l_1,r_1,t_1), ..., (l_k,r_k,t_k))` where
`k = n-1` and `I_i = (l_i,r_i)` denotes a random interval and
`t_i` a random positive value less then or equal to the length
of the interval `I_i`. The width of the representation is
limited to n**2 * 2**n.
.. NOTE::
The tolerance representation used to create the graph can
be recovered using ``get_vertex()`` or ``get_vertices()``.
INPUT:
- ``n`` -- number of vertices of the random graph.
EXAMPLES:
Every (bounded) tolerance graph is perfect. Hence, the
chromatic number is equal to the clique number ::
sage: g = graphs.RandomBoundedToleranceGraph(8)
sage: g.clique_number() == g.chromatic_number()
True
"""
from sage.misc.prandom import randint
from sage.graphs.generators.intersection import ToleranceGraph
W = n ** 2 * 2 ** n
tolrep = [(l_r[0], l_r[1], randint(0, l_r[1] - l_r[0])) for l_r in [sorted((randint(0, W), randint(0, W))) for i in range(n)]]
return ToleranceGraph(tolrep)
def RandomGNM(n, m, dense=False, seed=None):
"""
Returns a graph randomly picked out of all graphs on n vertices
with m edges.
INPUT:
- ``n`` - number of vertices.
- ``m`` - number of edges.
- ``dense`` - whether to use NetworkX's
dense_gnm_random_graph or gnm_random_graph
- ``seed`` - a ``random.Random`` seed or a Python ``int`` for the random
number generator (default: ``None``).
EXAMPLES: We show the edge list of a random graph on 5 nodes with
10 edges.
::
sage: graphs.RandomGNM(5, 10).edges(labels=False)
[(0, 1), (0, 2), (0, 3), (0, 4), (1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]
We plot a random graph on 12 nodes with m = 12.
::
sage: gnm = graphs.RandomGNM(12, 12)
sage: gnm.show() # long time
We view many random graphs using a graphics array::
sage: g = []
sage: j = []
sage: for i in range(9):
....: k = graphs.RandomGNM(i+3, i^2-i)
....: g.append(k)
sage: for i in range(3):
....: n = []
....: for m in range(3):
....: n.append(g[3*i + m].plot(vertex_size=50, vertex_labels=False))
....: j.append(n)
sage: G = graphics_array(j)
sage: G.show() # long time
"""
if seed is None:
seed = int(current_randstate().long_seed() % sys.maxsize)
import networkx
if dense:
return Graph(networkx.dense_gnm_random_graph(n, m, seed=seed))
else:
return Graph(networkx.gnm_random_graph(n, m, seed=seed))
def RandomNewmanWattsStrogatz(n, k, p, seed=None):
r"""
Return a Newman-Watts-Strogatz small world random graph on `n` vertices.
From the NetworkX documentation: first create a ring over `n` nodes. Then
each node in the ring is connected with its `k` nearest neighbors. Then
shortcuts are created by adding new edges as follows: for each edge `u-v` in
the underlying "`n`-ring with `k` nearest neighbors"; with probability `p`
add a new edge `u-w` with randomly-chosen existing node `w`. In contrast
with ``networkx.watts_strogatz_graph()``, no edges are removed.
INPUT:
- ``n`` -- number of vertices
- ``k`` -- each vertex is connected to its `k` nearest neighbors
- ``p`` -- the probability of adding a new edge for each edge
- ``seed`` -- a ``random.Random`` seed or a Python ``int`` for the random
number generator (default: ``None``)
EXAMPLES:
We check that the generated graph contains a cycle of order `n`::
sage: G = graphs.RandomNewmanWattsStrogatz(7, 2, 0.2)
sage: G.order(), G.size()
(7, 9) # 64-bit
(7, 10) # 32-bit
sage: C7 = graphs.CycleGraph(7)
sage: G.subgraph_search(C7)
Subgraph of (): Graph on 7 vertices
sage: G.diameter() <= C7.diameter()
True
::
sage: G = graphs.RandomNewmanWattsStrogatz(12, 2, .3)
sage: G.show() # long time
TESTS:
We check that when `k = 2` and `p = 0`, the generated graph is a cycle::
sage: G = graphs.RandomNewmanWattsStrogatz(7, 2, 0)
sage: G.is_cycle()
True
We check that when `k = 4` and `p = 0`, the generated graph is a circulant
graph of parameters ``[1, 2]``::
sage: G = graphs.RandomNewmanWattsStrogatz(7, 4, 0)
sage: G.is_isomorphic(graphs.CirculantGraph(7, [1, 2]))
True
REFERENCE:
[NWS2002]_
"""
if seed is None:
seed = int(current_randstate().long_seed() % sys.maxsize)
import networkx
return Graph(networkx.newman_watts_strogatz_graph(n, k, p, seed=seed))
def RandomHolmeKim(n, m, p, seed=None):
r"""
Return a random graph generated by the Holme and Kim algorithm for
graphs with power law degree distribution and approximate average
clustering.
INPUT:
- ``n`` -- number of vertices
- ``m`` -- number of random edges to add for each new node
- ``p`` -- probability of adding a triangle after adding a random edge
- ``seed`` -- a ``random.Random`` seed or a Python ``int`` for the random
number generator (default: ``None``)
From the NetworkX documentation: the average clustering has a hard time
getting above a certain cutoff that depends on `m`. This cutoff is often
quite low. Note that the transitivity (fraction of triangles to possible
triangles) seems to go down with network size. It is essentially the
Barabasi-Albert growth model with an extra step that each random edge is
followed by a chance of making an edge to one of its neighbors too (and thus
a triangle). This algorithm improves on B-A in the sense that it enables a
higher average clustering to be attained if desired. It seems possible to
have a disconnected graph with this algorithm since the initial `m` nodes
may not be all linked to a new node on the first iteration like the BA
model.
EXAMPLES:
We check that a random graph on 8 nodes with 2 random edges per node and a
probability `p = 0.5` of forming triangles contains a triangle::
sage: G = graphs.RandomHolmeKim(8, 2, 0.5)
sage: G.order(), G.size()
(8, 12)
sage: C3 = graphs.CycleGraph(3)
sage: G.subgraph_search(C3)
Subgraph of (): Graph on 3 vertices
::
sage: G = graphs.RandomHolmeKim(12, 3, .3)
sage: G.show() # long time
REFERENCE:
[HK2002a]_
"""
if seed is None:
seed = int(current_randstate().long_seed() % sys.maxsize)
import networkx
return Graph(networkx.powerlaw_cluster_graph(n, m, p, seed=seed))
def RandomIntervalGraph(n):
r"""
Returns a random interval graph.
An interval graph is built from a list `(a_i,b_i)_{1\leq i \leq n}`
of intervals : to each interval of the list is associated one
vertex, two vertices being adjacent if the two corresponding
intervals intersect.
A random interval graph of order `n` is generated by picking
random values for the `(a_i,b_j)`, each of the two coordinates
being generated from the uniform distribution on the interval
`[0,1]`.
This definitions follows [BF2001]_.
.. NOTE::
The vertices are named 0, 1, 2, and so on. The intervals
used to create the graph are saved with the graph and can
be recovered using ``get_vertex()`` or ``get_vertices()``.
INPUT:
- ``n`` (integer) -- the number of vertices in the random
graph.
EXAMPLES:
As for any interval graph, the chromatic number is equal to
the clique number ::
sage: g = graphs.RandomIntervalGraph(8)
sage: g.clique_number() == g.chromatic_number()
True
"""
from sage.misc.prandom import random
from sage.graphs.generators.intersection import IntervalGraph
intervals = [tuple(sorted((random(), random()))) for i in range(n)]
return IntervalGraph(intervals,True)
# Random Chordal Graphs
def growing_subtrees(T, k):
r"""
Return a list of the vertex sets of ``n`` randomly chosen subtrees of ``T``.
For a tree of order `n`, the collection contains `n` subtrees with maximum
order `k` and average order `\frac{k + 1}{2}`.
This method is part of
:meth:`~sage.graphs.generators.random.RandomChordalGraph`.
ALGORITHM:
For each subtree `T_i`, the algorithm picks a size `k_i` randomly from
`[1,k]`. Then a random node of `T` is chosen as the first node of `T_i`. In
each of the subsequent `k_i - 1` iterations, it picks a random node in the
neighborhood of `T_i` and adds it to `T_i`.
See [SHET2018]_ for more details.
INPUT:
- ``T`` -- a tree
- ``k`` -- a strictly positive integer; maximum size of a subtree
EXAMPLES::
sage: from sage.graphs.generators.random import growing_subtrees
sage: T = graphs.RandomTree(10)
sage: S = growing_subtrees(T, 5)
sage: len(S)
10
"""
from sage.misc.prandom import sample
n = T.order()
S = []
for _ in range(n):
ki = randint(1, k)
if ki == n:
Vi = frozenset(T)
else:
x = T.random_vertex()
Ti = set([x])
neighbors = set(T.neighbor_iterator(x))
for j in range(ki - 1):
# Select a random neighbor z outside of Ti and add it to Ti
z = sample(neighbors, 1)[0]
Ti.add(z)
neighbors.update(y for y in T.neighbor_iterator(z) if y not in Ti)
Vi = frozenset(Ti)
S.append(Vi)
return S
def connecting_nodes(T, l):
r"""
Return a list of the vertex sets of ``n`` randomly chosen subtrees of ``T``.
This method is part of
:meth:`~sage.graphs.generators.random.RandomChordalGraph`.
ALGORITHM:
For each subtree `T_i`, we first select `k_i` nodes of `T`, where `k_i` is a
random integer from a Poisson distribution with mean `l`. `T_i` is then
generated to be the minimal subtree that contains the selected `k_i`
nodes. This implies that a subtree will most likely have many more nodes
than those selected initially, and this must be taken into consideration
when choosing `l`.
See [SHET2018]_ for more details.
INPUT:
- ``T`` -- a tree
- ``l`` -- a strictly positive real number; mean of a Poisson distribution
EXAMPLES::
sage: from sage.graphs.generators.random import connecting_nodes
sage: T = graphs.RandomTree(10)
sage: S = connecting_nodes(T, 5)
sage: len(S)
10
"""
from sage.combinat.permutation import Permutations
from sage.data_structures.bitset import Bitset
from numpy.random import poisson
n = T.order()
V = list(T)
P = Permutations(V)
active = Bitset(capacity=n)
# Choose a root
root = T.random_vertex()
# Perform BFS from root and identify parent in root to leaf orientation
parent = {root: root}
dist = {root: 0}
bfs = [root]
i = 0
while i < n:
u = bfs[i]
d = dist[u]
for v in T.neighbor_iterator(u):
if v not in parent:
parent[v] = u
dist[v] = d + 1
bfs.append(v)
i += 1
S = []
for _ in range(n):
ki = poisson(l)
if not ki:
ki = 1
elif ki >= n:
Ti = frozenset(V)
if ki < n:
# Select ki vertices at random
Vi = set(P.random_element()[:ki])
# Arrange them by distance to root and mark them as active
d = max(dist[u] for u in Vi)
Li = [set() for _ in range(d + 1)]
active.clear()
for u in Vi:
Li[dist[u]].add(u)
active.add(u)
# Add to Vi the vertices of a minimal subtree containing Vi.
# To do so, add the parents of the vertices at distance d to Vi,
# mark them as active and add them to the set of vertices at
# distance d - 1. Then mark the vertices at distance d as
# inactive. Repeat the same procedure for the vertices at distance
# d - 1, d - 2, etc. This procedure ends when at most one active
# vertex remains.
while len(active) > 1:
for u in Li[d]:
p = parent[u]
Vi.add(p)
Li[d - 1].add(p)
active.add(p)
active.discard(u)
d -= 1
Ti = frozenset(Vi)
S.append(Ti)
return S
def pruned_tree(T, f, s):
r"""
Return a list of the vertex sets of ``n`` randomly chosen subtrees of ``T``.
This method is part of
:meth:`~sage.graphs.generators.random.RandomChordalGraph`.
ALGORITHM:
For each subtree `T_i`, it randomly selects a fraction `f` of the edges on
the tree and removes them. The number of edges to delete, say `l`, is
calculated as `\lfloor((n - 1)f\rfloor`, which will leave `l + 1` subtrees
in total. Then, it determines the sizes of the `l + 1` subtrees and stores
the distinct values. Finally, it picks a random size `k_i` from the set of
largest `100(1-s)\%` of distinct values, and randomly chooses a subtree with
size `k_i`.
See [SHET2018]_ for more details.
INPUT:
- ``T`` -- a tree
- ``f`` -- a rational number; the edge deletion fraction. This value must be
choosen in `[0..1]`.
- ``s`` -- a real number between 0 and 1; selection barrier for the size of
trees
EXAMPLES::
sage: from sage.graphs.generators.random import pruned_tree
sage: T = graphs.RandomTree(11)
sage: S = pruned_tree(T, 1/10, 0.5)
sage: len(S)
11
"""
n = T.order()
ke = int((n - 1) * f)
if not ke:
# No removed edge. Only one possible subtree
return [tuple(T)] * n
elif ke == n - 1:
# All edges are removed. Only n possible subtrees
return [(u,) for u in T]
random_edge_iterator = T.random_edge_iterator(labels=False)
TT = T.copy()
S = []
for _ in range(n):
# Choose ke = (n - 1) * f edges and remove them from TT
E = set()
while len(E) < ke:
E.add(next(random_edge_iterator))
TT.delete_edges(E)
# Compute the connected components of TT and arrange them by sizes
CC = {}
for c in TT.connected_components(sort=False):
l = len(c)
if l in CC:
CC[l].append(c)
else:
CC[l] = [c]
# Randomly select a subtree size ki from the highest 100(1 - s) %
# subtree sizes
sizes = sorted(set(CC.keys()), reverse=True)
ki = sizes[randint(0, int(len(sizes) * (1 - s)))]
# Randomly select a subtree of size ki
Ti = frozenset(CC[ki][randint(0, len(CC[ki]) - 1)])
S.append(Ti)
TT.add_edges(E)
return S
def RandomChordalGraph(n, algorithm="growing", k=None, l=None, f=None, s=None):
r"""
Return a random chordal graph of order ``n``.
A Graph `G` is said to be chordal if it contains no induced hole (a cycle of
length at least 4). Equivalently, `G` is chordal if it has a perfect
elimination orderings, if each minimal separator is a clique, or if it is
the intersection graphs of subtrees of a tree. See the
:wikipedia:`Chordal_graph`.
This generator implements the algorithms proposed in [SHET2018]_ for
generating random chordal graphs as the intersection graph of `n` subtrees
of a tree of order `n`.
The returned graph is not necessarily connected.
INPUT:
- ``n`` -- integer; the number of nodes of the graph
- ``algorithm`` -- string (default: ``"growing"``); the choice of the
algorithm for randomly selecting `n` subtrees of a random tree of order
`n`. Possible choices are:
- ``"growing"`` -- for each subtree `T_i`, the algorithm picks a size
`k_i` randomly from `[1,k]`. Then a random node of `T` is chosen as the
first node of `T_i`. In each of the subsequent `k_i - 1` iterations, it
picks a random node in the neighborhood of `T_i` and adds it to `T_i`.
- ``"connecting"`` -- for each subtree `T_i`, it first selects `k_i` nodes
of `T`, where `k_i` is a random integer from a Poisson distribution with
mean `l`. `T_i` is then generated to be the minimal subtree containing
the selected `k_i` nodes. This implies that a subtree will most likely
have many more nodes than those selected initially, and this must be
taken into consideration when choosing `l`.
- ``"pruned"`` -- for each subtree `T_i`, it randomly selects a fraction
`f` of the edges on the tree and removes them. The number of edges to
delete, say `l`, is calculated as `\lfloor (n - 1) f \rfloor`, which will
leave `l + 1` subtrees in total. Then, it determines the sizes of the `l
+ 1` subtrees and stores the distinct values. Finally, it picks a random
size `k_i` from the set of largest `100(1-s)\%` of distinct values, and
randomly chooses a subtree with size `k_i`.
- ``k`` -- integer (default: ``None``); maximum size of a subtree. If not
specified (``None``), the maximum size is set to `\sqrt{n}`.
This parameter is used only when ``algorithm="growing"``. See
:meth:`~sage.graphs.generators.random.growing_subtrees` for more details.
- ``l`` -- a strictly positive real number (default: ``None``); mean of a
Poisson distribution. If not specified, the mean in set to `\log_2{n}`.
This parameter is used only when ``algorithm="connecting"``. See
:meth:`~sage.graphs.generators.random.connecting_nodes` for more details.
- ``f`` -- a rational number (default: ``None``); the edge deletion
fraction. This value must be choosen in `[0..1]`. If not specified, this
parameter is set to `\frac{1}{n-1}`.
This parameter is used only when ``algorithm="pruned"``.
See :meth:`~sage.graphs.generators.random.pruned_tree` for more details.
- ``s`` -- a real number between 0 and 1 (default: ``None``); selection
barrier for the size of trees. If not specified, this parameter is set to
`0.5`. This parameter is used only when ``algorithm="pruned"``.
See :meth:`~sage.graphs.generators.random.pruned_tree` for more details.
EXAMPLES::
sage: from sage.graphs.generators.random import RandomChordalGraph
sage: T = RandomChordalGraph(20, algorithm="growing", k=5)
sage: T.is_chordal()
True
sage: T = RandomChordalGraph(20, algorithm="connecting", l=3)
sage: T.is_chordal()
True
sage: T = RandomChordalGraph(20, algorithm="pruned", f=1/3, s=.5)
sage: T.is_chordal()
True
TESTS::
sage: from sage.graphs.generators.random import RandomChordalGraph
sage: all(RandomChordalGraph(i).is_chordal() for i in range(4))
True
sage: RandomChordalGraph(3, algorithm="Carmen Cru")
Traceback (most recent call last):
...
NotImplementedError: unknown algorithm 'Carmen Cru'
sage: RandomChordalGraph(3, algorithm="growing", k=0)
Traceback (most recent call last):
...
ValueError: parameter k must be >= 1
sage: RandomChordalGraph(3, algorithm="connecting", l=0)
Traceback (most recent call last):
...
ValueError: parameter l must be > 0
sage: RandomChordalGraph(3, algorithm="pruned", f=2)
Traceback (most recent call last):
...
ValueError: parameter f must be 0 <= f <= 1
sage: RandomChordalGraph(3, algorithm="pruned", s=1)
Traceback (most recent call last):
...
ValueError: parameter s must be 0 < s < 1
.. SEEALSO::
- :meth:`~sage.graphs.graph_generators.growing_subtrees`
- :meth:`~sage.graphs.graph_generators.connecting_nodes`
- :meth:`~sage.graphs.graph_generators.pruned_tree`
- :wikipedia:`Chordal_graph`
- :meth:`~sage.graphs.generic_graph.GenericGraph.is_chordal`
- :meth:`~sage.graphs.graph_generators.GraphGenerators.IntersectionGraph`
"""
if n < 2:
return Graph(n, name="Random Chordal Graph")
# 1. Generate a random tree of order n
T = RandomTree(n)
# 2. Generate n non-empty subtrees of T: {T1,...,Tn}
if algorithm == "growing":
if k is None:
from sage.rings.integer import Integer
k = int(Integer(n).sqrt())
elif k < 1:
raise ValueError("parameter k must be >= 1")
S = growing_subtrees(T, k)
elif algorithm == "connecting":
if l is None:
from sage.rings.integer import Integer
l = Integer(n).log(2)
elif l <= 0:
raise ValueError("parameter l must be > 0")
S = connecting_nodes(T, l)
elif algorithm == "pruned":
if f is None:
from sage.rings.rational import Rational
f = 1 / Rational(n - 1)
elif f < 0 or f > 1:
raise ValueError("parameter f must be 0 <= f <= 1")
if s is None:
s = .5
elif s <= 0 or s >= 1:
raise ValueError("parameter s must be 0 < s < 1")
S = pruned_tree(T, f, s)
else:
raise NotImplementedError("unknown algorithm '{}'".format(algorithm))
# 3. Build the intersection graph of {V(T1),...,V(Tn)}
vertex_to_subtrees = [[] for _ in range(n)]
for i,s in enumerate(S):
for x in s:
vertex_to_subtrees[x].append(i)
G = Graph(n, name="Random Chordal Graph")
for X in vertex_to_subtrees:
G.add_clique(X)
return G
def RandomLobster(n, p, q, seed=None):
"""
Returns a random lobster.
A lobster is a tree that reduces to a caterpillar when pruning all
leaf vertices. A caterpillar is a tree that reduces to a path when
pruning all leaf vertices (q=0).
INPUT:
- ``n`` - expected number of vertices in the backbone
- ``p`` - probability of adding an edge to the
backbone
- ``q`` - probability of adding an edge (claw) to the
arms
- ``seed`` - a ``random.Random`` seed or a Python ``int`` for the random
number generator (default: ``None``).
EXAMPLES: We show the edge list of a random graph with 3 backbone
nodes and probabilities `p = 0.7` and `q = 0.3`::
sage: graphs.RandomLobster(3, 0.7, 0.3).edges(labels=False)
[] # 32-bit
[(0, 1), (0, 5), (1, 2), (1, 6), (2, 3), (2, 7), (3, 4), (3, 8)] # 64-bit
::
sage: G = graphs.RandomLobster(9, .6, .3)
sage: G.show() # long time
"""
if seed is None:
seed = int(current_randstate().long_seed() % sys.maxsize)
import networkx
return Graph(networkx.random_lobster(n, p, q, seed=seed))
def RandomTree(n):
r"""
Returns a random tree on `n` nodes numbered `0` through `n-1`.
By Cayley's theorem, there are `n^{n-2}` trees with vertex
set `\{0,1,...,n-1\}`. This constructor chooses one of these uniformly
at random.
ALGORITHM:
The algorithm works by generating an `(n-2)`-long
random sequence of numbers chosen independently and uniformly
from `\{0,1,\ldots,n-1\}` and then applies an inverse
Prufer transformation.
INPUT:
- ``n`` - number of vertices in the tree
EXAMPLES::
sage: G = graphs.RandomTree(10)
sage: G.is_tree()
True
sage: G.show() # long time
TESTS:
Ensuring that we encounter no unexpected surprise ::
sage: all( graphs.RandomTree(10).is_tree()
....: for i in range(100) )
True
"""
from sage.misc.prandom import randint
g = Graph()
# create random Prufer code
code = [ randint(0,n-1) for i in range(n-2) ]
# We count the number of symbols of each type.
# count[k] is the no. of times k appears in code
#
# (count[k] is set to -1 when the corresponding vertex is not
# available anymore)
count = [0] * n
for k in code:
count[k] += 1
g.add_vertices(range(n))
for s in code:
for x in range(n):
if count[x] == 0:
break
count[x] = -1
g.add_edge(x,s)
count[s] -= 1
# Adding as an edge the last two available vertices
last_edge = [ v for v in range(n) if count[v] != -1 ]
g.add_edge(last_edge)
return g
def RandomTreePowerlaw(n, gamma=3, tries=1000, seed=None):
"""
Return a tree with a power law degree distribution, or ``False`` on failure.
From the NetworkX documentation: a trial power law degree sequence is chosen
and then elements are swapped with new elements from a power law
distribution until the sequence makes a tree (size = order - 1).
INPUT:
- ``n`` -- number of vertices
- ``gamma`` -- exponent of power law distribution
- ``tries`` -- number of attempts to adjust sequence to make a tree
- ``seed`` -- a ``random.Random`` seed or a Python ``int`` for the random
number generator (default: ``None``)
EXAMPLES:
We check that the generated graph is a tree::
sage: G = graphs.RandomTreePowerlaw(10, 3)
sage: G.is_tree()
True
sage: G.order(), G.size()
(10, 9)
::
sage: G = graphs.RandomTreePowerlaw(15, 2)
sage: if G:
....: G.show() # random output, long time
"""
if seed is None:
seed = int(current_randstate().long_seed() % sys.maxsize)
import networkx
try:
return Graph(networkx.random_powerlaw_tree(n, gamma, seed=seed, tries=tries))
except networkx.NetworkXError:
return False
def RandomRegular(d, n, seed=None):
r"""
Return a random `d`-regular graph on `n` vertices, or ``False`` on failure.
Since every edge is incident to two vertices, `n\times d` must be even.
INPUT:
- ``d`` -- degree
- ``n`` -- number of vertices
- ``seed`` -- a ``random.Random`` seed or a Python ``int`` for the random
number generator (default: ``None``)
EXAMPLES:
We check that a random graph with 8 nodes each of degree 3 is 3-regular::
sage: G = graphs.RandomRegular(3, 8)
sage: G.is_regular(k=3)
True
sage: G.degree_histogram()
[0, 0, 0, 8]
::
sage: G = graphs.RandomRegular(3, 20)
sage: if G:
....: G.show() # random output, long time
REFERENCES:
- [KV2003]_
- [SW1999]_
"""
if seed is None:
seed = int(current_randstate().long_seed() % sys.maxsize)
import networkx
try:
N = networkx.random_regular_graph(d, n, seed=seed)
if N is False:
return False
return Graph(N, sparse=True)
except Exception:
return False
def RandomShell(constructor, seed=None):
"""
Return a random shell graph for the constructor given.
INPUT:
- ``constructor`` -- a list of 3-tuples `(n, m, d)`, each representing a
shell, where:
- ``n`` -- the number of vertices in the shell
- ``m`` -- the number of edges in the shell
- ``d`` -- the ratio of inter (next) shell edges to intra shell edges
- ``seed`` -- a ``random.Random`` seed or a Python ``int`` for the random
number generator (default: ``None``)
EXAMPLES::
sage: G = graphs.RandomShell([(10,20,0.8),(20,40,0.8)])
sage: G.order(), G.size()
(30, 52)
sage: G.show() # long time
"""
if seed is None:
seed = int(current_randstate().long_seed() % sys.maxsize)
import networkx
return Graph(networkx.random_shell_graph(constructor, seed=seed))
def RandomToleranceGraph(n):
r"""
Returns a random tolerance graph.
The random tolerance graph is built from a random tolerance representation
by using the function `ToleranceGraph`. This representation is a list
`((l_0,r_0,t_0), (l_1,r_1,t_1), ..., (l_k,r_k,t_k))` where `k = n-1` and
`I_i = (l_i,r_i)` denotes a random interval and `t_i` a random positive
value. The width of the representation is limited to n**2 * 2**n.
.. NOTE::
The vertices are named 0, 1, ..., n-1. The tolerance representation used
to create the graph is saved with the graph and can be recovered using
``get_vertex()`` or ``get_vertices()``.
INPUT:
- ``n`` -- number of vertices of the random graph.
EXAMPLES:
Every tolerance graph is perfect. Hence, the chromatic number is equal to
the clique number ::
sage: g = graphs.RandomToleranceGraph(8)
sage: g.clique_number() == g.chromatic_number()
True
TESTS::
sage: g = graphs.RandomToleranceGraph(-2)
Traceback (most recent call last):
...
ValueError: The number `n` of vertices must be >= 0.
"""
from sage.misc.prandom import randint
from sage.graphs.generators.intersection import ToleranceGraph
if n<0:
raise ValueError('The number `n` of vertices must be >= 0.')
W = n**2 * 2**n
tolrep = [tuple(sorted((randint(0,W), randint(0,W)))) + (randint(0,W),) for i in range(n)]
return ToleranceGraph(tolrep)
# uniform random triangulation using Schaeffer-Poulalhon algorithm
def _auxiliary_random_forest_word(n, k):
r"""
Return a random word used to generate random triangulations.
INPUT:
- ``n`` -- an integer
- ``k`` -- an integer
OUTPUT:
A binary sequence `w` of length `4n+2k-4` with `n` ones, such that any
proper prefix `u` of `w` satisfies `3|u|_1 - |u|_0 \geq -2k+4` (where
`|u|_1` and `|u|_0` are respectively the number of 1s and 0s in `u`). Those
words are the expected input of :func:`_contour_and_graph_from_words`.
ALGORITHM:
A random word with these numbers of `0` and `1` plus one additional `0` is
chosen. This word is then rotated such the prefix property is fulfilled for
each proper prefix and only violated by the final `0` (which is deleted
afterwards). There is exactly one such rotation (compare Section 4.3 in
[PS2006]_).
Let us consider a word `w` satisfying the expected conditions. By
drawing a step `(1,3)` for each `1` and a step `(1,-1)` for each `0` in
`w`, one gets a path starting at height `0`, ending at height `-2k+3`
(before removing the final `0`) and staying above (or on) the horizontal
line of height `-2k+4` except at the end point.
Now consider an arbitrary word `w` with `n` ones and `3n+2k-3` zeros. By
cutting the word at the first position of minimum height, let us write
`w=uv`. One can then see that the word `vu` touches the line of height
`-2k+3` only after the last step. Further one can see that this is the only
rotation of the word `w` with this property.
EXAMPLES::
sage: from sage.graphs.generators.random import _auxiliary_random_forest_word
sage: with(seed(94364165)):
....: _auxiliary_random_forest_word(4, 3)
....: _auxiliary_random_forest_word(3, 5)
[1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0]
[1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
TESTS::
sage: def partial_sums(w):
....: steps = {1: 3, 0: -1}
....: curr_sum = 0
....: for x in w:
....: curr_sum += steps[x]
....: yield curr_sum
sage: for k in range(3,6):
....: for n in range(k, 10):
....: w = _auxiliary_random_forest_word(n, k)
....: assert len(w) == 4*n + 2*k - 4
....: assert w.count(1) == n
....: for partial_sum in partial_sums(w):
....: assert partial_sum >= -2*k + 4
"""
from sage.misc.prandom import shuffle
w = [0] * (3*n + 2*k - 3) + [1] * n
shuffle(w)
# Finding the admissible shift
partial_sum = 0
min_value = 0
min_pos = 0
for i, x in enumerate(w):
if x:
partial_sum += 3
else:
partial_sum -= 1
if partial_sum < min_value:
min_value = partial_sum
min_pos = i
return w[min_pos+1:] + w[:min_pos]
def _contour_and_graph_from_words(pendant_word, forest_word):
r"""
Return the contour word and the graph of inner vertices of the `k`-gonal
forest associated with the words ``pendant_word`` and ``forest_word``.
INPUT:
- ``pendant_word`` -- a word with `k-1` zeros and `k-3` ones
- ``forest_word`` -- a word in `0` and `1` as given by
:func:`_auxiliary_random_word` with the parameter ``k`` set to the number
of zeros in ``pendant_word`` plus `1`
``forest_word`` must satisfy the conditions hinted in Proposition 5.4 of
[PS2006]_ (see :func:`_auxiliary_random_forest_word`).
OUTPUT:
a pair ``(seq, G)`` where:
- ``seq`` is a sequence of pairs (label, integer) representing the
contour walk along the `k`-gonal forest associated with the words
``pendant_word`` and ``forest_word``.
- ``G`` is the `k`-gonal forest associated with the words ``pendant_word``
and ``forest_word``.
The underlying bijection from words to `k`-gonal forests is described in
Section 5.1 of [PS2006]_. The ``pendant_word`` corresponds to the factor
`\binom{2k-4}{k-3}` in the counting formula of Proposition 5.4 and the
``forest_word`` corresponds to the factor `\frac{2k-3}{3m+2k-3}
\binom{4m+2k-4}{m}`.
In the ``forest_word``, the letter `1` means going away from the root ("up")
from an inner vertex to another inner vertex. The letter `0` denotes all
other steps of the discovery, i.e. either discovering a leaf vertex or going
toward the root ("down").
Inner vertices are tagged with 'in' and leaves are tagged with
'lf'. Inner vertices are moreover labelled by integers, and leaves
by the label of the neighbor inner vertex.
EXAMPLES::
sage: from sage.graphs.generators.random import _contour_and_graph_from_words
sage: seq, G = _contour_and_graph_from_words([0, 0], [1, 0, 0, 0, 0, 0])
sage: seq
[('in', 0),
('in', 3),
('lf', 3),
('in', 3),
('lf', 3),
('in', 3),
('in', 0),
('in', 1),
('in', 2)]
sage: G
Graph on 4 vertices
sage: from sage.graphs.generators.random import _auxiliary_random_forest_word
sage: _, G = _contour_and_graph_from_words([0, 1, 0, 0, 1, 0], _auxiliary_random_forest_word(20, 5)) # random
sage: len(G.faces())
2
sage: longw = [1,1,0,1,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0]
sage: _, G = _contour_and_graph_from_words([0, 0], longw)
sage: G.get_embedding()
{0: [1, 2, 3],
1: [2, 0],
2: [0, 1],
3: [0, 4],
4: [3, 5, 6],
5: [4],
6: [4, 7, 8],
7: [6],
8: [6]}
"""
k = (len(pendant_word)+4) // 2
index = 0 # numbering of inner vertices
word = [('in',0)] # the word representing the contour walk
# start with the outer face, a cycle of length k
edges = [[i, (i+1) % k] for i in range(k)]
embedding = {i: [(i+1) % k, (i-1+k) % k] for i in range(k)}
# add the pendant edges
for x in pendant_word:
if x:
word.extend([('lf', index), ('in', index)])
else:
index += 1
word.append(('in', index))
# add trees
curr_word_pos = 0
curr_forest_word_pos = 0
while curr_forest_word_pos < len(forest_word):
x = forest_word[curr_forest_word_pos]
# insert a tree at current position
if x:
index += 1
embedding[index] = [word[curr_word_pos][1]]
embedding[word[curr_word_pos][1]].append(index)
edges.append([word[curr_word_pos][1], index])
# stack of leaves still to be created
leaf_stack = [index, index]
# stack of active inner nodes
inner_stack = [word[curr_word_pos][1], index]
word.insert(curr_word_pos+1, ('in', index))
curr_word_pos += 1
while len(inner_stack) > 1:
curr_forest_word_pos += 1
x = forest_word[curr_forest_word_pos]
if x:
index += 1
embedding[index] = inner_stack[-1:]
embedding[inner_stack[-1]].append(index)
leaf_stack.extend([index, index])
inner_stack.append(index)
edges.append(inner_stack[-2:])
word.insert(curr_word_pos+1, ('in', index))
curr_word_pos += 1
else:
# up and down to a new leaf
if leaf_stack and inner_stack[-1] == leaf_stack[-1]:
leaf_stack.pop()
word.insert(curr_word_pos+1, ('lf', inner_stack[-1]))
word.insert(curr_word_pos+2, ('in', inner_stack[-1]))
curr_word_pos += 2
# going down to a known inner vertex
else:
inner_stack.pop()
word.insert(curr_word_pos+1, ('in', inner_stack[-1]))
curr_word_pos += 1
# go to next insertion position
else:
curr_word_pos += 1
if word[curr_word_pos][0] == 'lf':
curr_word_pos += 1
curr_forest_word_pos += 1
G = Graph(edges, format='list_of_edges')
G.set_embedding(embedding)
return word, G
def RandomTriangulation(n, set_position=False, k=3):
r"""
Return a random inner triangulation of an outer face of degree ``k`` with
``n`` vertices in total.
An inner triangulation is a plane graph all of whose faces (except the
outer/unbounded face) are triangles (3-cycles).
INPUT:
- ``n`` -- the number of vertices of the graph
- ``k`` -- the size of the outer face
- ``set_position`` -- boolean (default ``False``); if set to ``True``, this
will compute coordinates for a planar drawing of the graph.
OUTPUT:
A random graph chosen uniformly among the inner triangulations of a *rooted*
`k`-gon with `n` vertices (including the `k` vertices from the outer face).
This is a planar graph and comes with a combinatorial embedding. The
vertices of the root edge are labelled ``-1`` and ``-2`` and the outer face
is the face returned by :meth:`Graph.faces` in which ``-1`` and ``-2`` are
consecutive vertices in this order.
Because some triangulations have nontrivial automorphism
groups, this may not be equal to the uniform distribution among inner
triangulations of unrooted `k`-gons.
ALGORITHM:
The algorithm is taken from [PS2006]_, Section 5.
Starting from a planar `k`-gonal forest (represented by its contour as a
sequence of vertices), one performs local closures, until no
one is possible. A local closure amounts to replace in the cyclic
contour word a sequence ``in1, in2, in3, lf, in3`` by
``in1, in3``.
At every step of the algorithm, newly created edges are recorded
in a graph, which will be returned at the end.
The combinatorial embedding is also computed and recorded in the
output graph.
.. SEEALSO::
:meth:`~sage.graphs.graph_generators.GraphGenerators.triangulations`,
:func:`~sage.homology.examples.RandomTwoSphere`.
EXAMPLES::
sage: G = graphs.RandomTriangulation(6, True); G
Graph on 6 vertices
sage: G.is_planar()
True
sage: G.girth()
3
sage: G.plot(vertex_size=0, vertex_labels=False)
Graphics object consisting of 13 graphics primitives
sage: H = graphs.RandomTriangulation(7, k=5)
sage: sorted(len(f) for f in H.faces())
[3, 3, 3, 3, 3, 3, 3, 5]
TESTS::
sage: G.get_embedding() is not None
True
sage: graphs.RandomTriangulation(3, k=4)
Traceback (most recent call last):
...
ValueError: The number 'n' of vertices must be at least the size 'k' of the outer face.
sage: graphs.RandomTriangulation(3, k=2)
Traceback (most recent call last):
...
ValueError: The size 'k' of the outer face must be at least 3.
sage: for i in range(10):
....: g = graphs.RandomTriangulation(30) # random
....: assert g.is_planar()
sage: for k in range(3, 10):
....: g = graphs.RandomTriangulation(10, k=k) # random
....: assert g.is_planar(on_embedding=g.get_embedding())
"""
if k < 3:
raise ValueError("The size 'k' of the outer face must be at least 3.")
if n < k:
raise ValueError("The number 'n' of vertices must be at least the size "
"'k' of the outer face.")
from sage.misc.prandom import shuffle
pendant_word = [0] * (k-1) + [1] * (k-3)
shuffle(pendant_word)
forest_word = _auxiliary_random_forest_word(n-k, k)
word, graph = _contour_and_graph_from_words(pendant_word, forest_word)
edges = []
embedding = graph.get_embedding()
pattern = ['in', 'in', 'in', 'lf', 'in'] # 'partial closures'
def rotate_word_to_next_occurrence(word):
"""
Rotate ``word`` so that the given pattern occurs at the beginning.
If the given pattern is not found, return the empty list.
"""
N = len(word)
for i in range(N):
if all(word[(i + j) % N][0] == pattern[j] for j in range(5)):
return word[i:] + word[:i]
return []
# We greedily perform the replacements 'in1,in2,in3,lf,in3'->'in1,in3'.
while True:
word2 = rotate_word_to_next_occurrence(word)
if len(word2) >= 5:
word = [word2[0]] + word2[4:]
in1, in2, in3 = [u[1] for u in word2[:3]]
edges.append([in1, in3]) # edge 'in1,in3'
idx = embedding[in1].index(in2)
embedding[in1].insert(idx, in3)
idx = embedding[in3].index(in2)
embedding[in3].insert(idx + 1, in1)
else:
break
graph.add_edges(edges)
graph.set_embedding(embedding)
graph.relabel({0: -2, 1: -1})
assert graph.num_edges() == 3*n - 3 - k
assert graph.num_verts() == n
if set_position:
graph.layout(layout="planar", save_pos=True)
return graph
def blossoming_contour(t, shift=0):
"""
Return a random blossoming of a binary tree `t`, as a contour word.
This is doing several things simultaneously:
- complete the binary tree, by adding leaves labelled ``xb``,
- add a vertex labelled ``n`` at the middle of every inner
edge, with a leaf labelled ``x`` either on the left or on the
right (at random),
- number all vertices (but not leaves) by integers starting from `shift`,
- compute the counter-clockwise contour word of the result.
Initial vertices receive the label ``i``.
This is an auxiliary function, used for the generation of random
planar bicubic maps.
INPUT:
- `t` -- a binary tree (non-empty)
- ``shift`` -- an integer (default `0`), used as a starting index
OUTPUT:
contour word of a random blossoming of `t`
EXAMPLES::
sage: from sage.graphs.generators.random import blossoming_contour
sage: print(blossoming_contour(BinaryTrees(1).an_element()))
[('i', 0), ('xb',), ('i', 0), ('xb',), ('i', 0)]
sage: t = BinaryTrees(2).random_element()
sage: print(blossoming_contour(t)) # random
[('i', 0), ('xb',), ('i', 0), ('n', 2), ('i', 1), ('xb',), ('i', 1),
('xb',), ('i', 1), ('n', 2), ('x',), ('n', 2), ('i', 0)]
sage: w = blossoming_contour(BinaryTrees(3).random_element()); len(w)
21
sage: w.count(('xb',))
4
sage: w.count(('x',))
2
TESTS::
sage: from sage.graphs.generators.random import blossoming_contour
sage: blossoming_contour(BinaryTrees(0).an_element())
Traceback (most recent call last):
...
ValueError: tree must be non-empty
"""
if not t:
raise ValueError('tree must be non-empty')
t1, t2 = t
leaf_xb = ('xb',)
leaf_x = ('x',)
n1 = t1.node_number()
n = t.node_number()
# adding buds on edges in t1
if not t1:
tt1 = [leaf_xb]
elif randint(0, 1):
label1 = ('n', shift)
tt1 = [label1, leaf_x, label1] + blossoming_contour(t1, shift + 1)
tt1 += [label1]
else:
label1 = ('n', shift + 2 * n1 - 1)
tt1 = [label1] + blossoming_contour(t1, shift)
tt1 += [label1, leaf_x, label1]
# adding buds on edges in t2
if not t2:
tt2 = [leaf_xb]
elif randint(0, 1):
label2 = ('n', shift + 2 * n1 + 1)
tt2 = [label2, leaf_x, label2]
tt2 += blossoming_contour(t2, shift + 2 * n1 + 2) + [label2]
else:
label2 = ('n', shift + 2 * n - 2)
tt2 = [label2] + blossoming_contour(t2, shift + 2 * n1 + 1)
tt2 += [label2, leaf_x, label2]
label = [('i', shift + 2 * n1)]
return label + tt1 + label + tt2 + label
def RandomBicubicPlanar(n):
"""
Return the graph of a random bipartite cubic map with `3 n` edges.
INPUT:
`n` -- an integer (at least `1`)
OUTPUT:
a graph with multiple edges (no embedding is provided)
The algorithm used is described in [Sch1999]_. This samples
a random rooted bipartite cubic map, chosen uniformly at random.
First one creates a random binary tree with `n` vertices. Next one
turns this into a blossoming tree (at random) and reads the
contour word of this blossoming tree.
Then one performs a rotation on this word so that this becomes a
balanced word. There are three ways to do that, one is picked at
random. Then a graph is build from the balanced word by iterated
closure (adding edges).
In the returned graph, the three edges incident to any given
vertex are colored by the integers 0, 1 and 2.
.. SEEALSO:: the auxiliary method :func:`blossoming_contour`
EXAMPLES::
sage: n = randint(200, 300)
sage: G = graphs.RandomBicubicPlanar(n)
sage: G.order() == 2*n
True
sage: G.size() == 3*n
True
sage: G.is_bipartite() and G.is_planar() and G.is_regular(3)
True
sage: dic = {'red':[v for v in G.vertices() if v[0] == 'n'],
....: 'blue': [v for v in G.vertices() if v[0] != 'n']}
sage: G.plot(vertex_labels=False,vertex_size=20,vertex_colors=dic)
Graphics object consisting of ... graphics primitives
.. PLOT::
:width: 300 px
G = graphs.RandomBicubicPlanar(200)
V0 = [v for v in G.vertices() if v[0] == 'n']
V1 = [v for v in G.vertices() if v[0] != 'n']
dic = {'red': V0, 'blue': V1}
sphinx_plot(G.plot(vertex_labels=False,vertex_colors=dic))
"""
from sage.combinat.binary_tree import BinaryTrees
from sage.rings.finite_rings.integer_mod_ring import Zmod
if not n:
raise ValueError("n must be at least 1")
# first pick a random binary tree
t = BinaryTrees(n).random_element()
# next pick a random blossoming of this tree, compute its contour
contour = blossoming_contour(t) + [('xb',)] # adding the final xb
# first step : rotate the contour word to one of 3 balanced
N = len(contour)
double_contour = contour + contour
pile = []
not_touched = [i for i in range(N) if contour[i][0] in ['x', 'xb']]
for i, w in enumerate(double_contour):
if w[0] == 'x' and i < N:
pile.append(i)
elif w[0] == 'xb' and (i % N) in not_touched:
if pile:
j = pile.pop()
not_touched.remove(i % N)
not_touched.remove(j)
# random choice among 3 possibilities for a balanced word
idx = not_touched[randint(0, 2)]
w = contour[idx + 1:] + contour[:idx + 1]
# second step : create the graph by closure from the balanced word
G = Graph(multiedges=True)
pile = []
Z3 = Zmod(3)
colour = Z3.zero()
not_touched = [i for i, v in enumerate(w) if v[0] in ['x', 'xb']]
for i, v in enumerate(w):
# internal edges
if v[0] == 'i':
colour += 1
if w[i + 1][0] == 'n':
G.add_edge((w[i], w[i + 1], colour))
elif v[0] == 'n':
colour += 2
elif v[0] == 'x':
pile.append(i)
elif v[0] == 'xb' and i in not_touched:
if pile:
j = pile.pop()
G.add_edge((w[i + 1], w[j - 1], colour))
not_touched.remove(i)
not_touched.remove(j)
# there remains to add three edges to elements of "not_touched"
# from a new vertex labelled "n"
for i in not_touched:
taken_colours = [edge[2] for edge in G.edges_incident(w[i - 1])]
colour = [u for u in Z3 if u not in taken_colours][0]
G.add_edge((('n', -1), w[i - 1], colour))
return G
| 33.845307 | 130 | 0.587627 |
c619c8bc8cc5f33a81302b10d642eb086f495e30
| 8,508 |
py
|
Python
|
tests/test_identifier.py
|
xli/client-sdk-python
|
1d0ec7f7b395bd827b778f1903001088e799fb05
|
[
"Apache-2.0"
] | 1 |
2021-02-15T14:41:34.000Z
|
2021-02-15T14:41:34.000Z
|
tests/test_identifier.py
|
xli/client-sdk-python
|
1d0ec7f7b395bd827b778f1903001088e799fb05
|
[
"Apache-2.0"
] | null | null | null |
tests/test_identifier.py
|
xli/client-sdk-python
|
1d0ec7f7b395bd827b778f1903001088e799fb05
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
import pytest
from diem import identifier, utils, InvalidSubAddressError, InvalidAccountAddressError
test_onchain_address = "f72589b71ff4f8d139674a3f7369c69b"
test_sub_address = "cf64428bdeb62af2"
none_sub_address = None
zero_sub_address = "00" * 8
@pytest.fixture(
scope="module",
params=[
(
"dm",
"dm1p7ujcndcl7nudzwt8fglhx6wxnvqqqqqqqqqqqqqd8p9cq",
"dm1p7ujcndcl7nudzwt8fglhx6wxn08kgs5tm6mz4us2vfufk",
),
(
"tdm",
"tdm1p7ujcndcl7nudzwt8fglhx6wxnvqqqqqqqqqqqqqv88j4s",
"tdm1p7ujcndcl7nudzwt8fglhx6wxn08kgs5tm6mz4ustv0tyx",
),
],
)
def hrp_addresses(request):
return request.param
def test_identifier_hrps():
assert identifier.HRPS == {1: "dm", 2: "tdm", 3: "tdm", 4: "tdm"}
def test_encode_addr_success(hrp_addresses):
hrp, enocded_addr_with_none_subaddr, enocded_addr_with_subaddr = hrp_addresses
# test with none sub_address
enocded_addr = identifier.encode_account(test_onchain_address, None, hrp)
assert enocded_addr == enocded_addr_with_none_subaddr
# even with zero sub_address, expected should not change from above
enocded_addr = identifier.encode_account(test_onchain_address, zero_sub_address, hrp)
assert enocded_addr == enocded_addr_with_none_subaddr
# test with some subaddress
enocded_addr = identifier.encode_account(test_onchain_address, test_sub_address, hrp)
assert enocded_addr == enocded_addr_with_subaddr
# accept AccountAddress and bytes sub-address as params too
enocded_addr = identifier.encode_account(
utils.account_address(test_onchain_address), utils.sub_address(test_sub_address), hrp
)
assert enocded_addr == enocded_addr_with_subaddr
def test_encode_addr_fail(hrp_addresses):
hrp = hrp_addresses[0]
# wrong subadress (length should be 8 bytes)
with pytest.raises(InvalidSubAddressError):
identifier.encode_account(test_onchain_address, test_sub_address[:-2], hrp)
# wrong address (length should be 16 bytes)
with pytest.raises(InvalidAccountAddressError):
identifier.encode_account(test_onchain_address + "ff", test_sub_address[:-2], hrp)
def test_decode_addr_success(hrp_addresses):
hrp, enocded_addr_with_none_subaddr, enocded_addr_with_subaddr = hrp_addresses
# test enocded_addr_with_none_subaddr
addr, subaddr = identifier.decode_account(enocded_addr_with_none_subaddr, hrp)
assert addr.to_hex() == test_onchain_address
assert subaddr is None
# test enocded_addr_with_subaddr
addr, subaddr = identifier.decode_account(enocded_addr_with_subaddr, hrp)
assert addr.to_hex() == test_onchain_address
assert subaddr.hex() == test_sub_address
def test_encode_decode_with_random_hrp():
# test with none sub_address
id = identifier.encode_account(test_onchain_address, None, "abc")
addr, sub = identifier.decode_account(id, "abc")
assert addr.to_hex() == test_onchain_address
assert sub is None
def test_decode_addr_fail(hrp_addresses):
hrp, enocded_addr_with_none_subaddr, enocded_addr_with_subaddr = hrp_addresses
# fail to decode invalid hrp
invalid_hrp_encoded_address = "btc1p7ujcndcl7nudzwt8fglhx6wxn08kgs5tm6mz4usw5p72t"
with pytest.raises(ValueError):
identifier.decode_account(invalid_hrp_encoded_address, hrp)
# fail to decode invalid "expected" hrp
with pytest.raises(ValueError):
identifier.decode_account(enocded_addr_with_none_subaddr, "xdm")
# fail to decode invalid version
invalid_version_encoded_address = enocded_addr_with_none_subaddr.replace("1p7", "1q7") # p (1) -> q (2)
with pytest.raises(ValueError):
identifier.decode_account(invalid_version_encoded_address, hrp)
# fail to decode due to checksum error
invalid_checksum_encoded_address = enocded_addr_with_none_subaddr.replace("d8p9cq", "d8p9c7").replace(
"v88j4s", "v88j4q"
)
with pytest.raises(ValueError):
identifier.decode_account(invalid_checksum_encoded_address, hrp)
# fail to decode mixed case per BIP 173
mixedcase_encoded_address = enocded_addr_with_none_subaddr.replace("qqqqqqqqqqqqq", "qqQqqqqqqqqqq")
with pytest.raises(ValueError):
identifier.decode_account(mixedcase_encoded_address, hrp)
# fail to decode shorter payload
short_encoded_address = enocded_addr_with_none_subaddr.replace("qqqqqqqqqqqqq", "qqqqqqqqqqq")
with pytest.raises(ValueError):
identifier.decode_account(short_encoded_address, hrp)
# fail to decode larger payload
large_encoded_address = enocded_addr_with_none_subaddr.replace("qqqqqqqqqqqqq", "qqqqqqqqqqqqqq")
with pytest.raises(ValueError):
identifier.decode_account(large_encoded_address, hrp)
# fail to decode invalid separator
invalid_separator_encoded_address = enocded_addr_with_none_subaddr.replace("1p7", "0p7")
with pytest.raises(ValueError):
identifier.decode_account(invalid_separator_encoded_address, hrp)
# fail to decode invalid character
invalid_char_encoded_address = enocded_addr_with_none_subaddr.replace("1p7", "1pb")
with pytest.raises(ValueError):
identifier.decode_account(invalid_char_encoded_address, hrp)
def test_intent_identifier(hrp_addresses):
hrp, enocded_addr_with_none_subaddr, enocded_addr_with_subaddr = hrp_addresses
account_id = identifier.encode_account(test_onchain_address, None, hrp)
intent_id = identifier.encode_intent(account_id, "XUS", 123)
assert intent_id == "diem://%s?c=%s&am=%d" % (enocded_addr_with_none_subaddr, "XUS", 123)
intent = identifier.decode_intent(intent_id, hrp)
assert intent.account_address == utils.account_address(test_onchain_address)
assert intent.account_address_bytes.hex() == test_onchain_address
assert intent.sub_address is None
assert intent.currency_code == "XUS"
assert intent.amount == 123
assert account_id == intent.account_id
def test_intent_identifier_with_sub_address(hrp_addresses):
hrp, enocded_addr_with_none_subaddr, enocded_addr_with_subaddr = hrp_addresses
account_id = identifier.encode_account(test_onchain_address, test_sub_address, hrp)
intent_id = identifier.encode_intent(account_id, "XUS", 123)
assert intent_id == "diem://%s?c=%s&am=%d" % (enocded_addr_with_subaddr, "XUS", 123)
intent = identifier.decode_intent(intent_id, hrp)
assert intent.account_address_bytes.hex() == test_onchain_address
assert intent.sub_address == bytes.fromhex(test_sub_address)
assert intent.currency_code == "XUS"
assert intent.amount == 123
def test_intent_identifier_decode_errors(hrp_addresses):
hrp, enocded_addr_with_none_subaddr, enocded_addr_with_subaddr = hrp_addresses
# amount is not int
with pytest.raises(identifier.InvalidIntentIdentifierError):
identifier.decode_intent("diem://%s?c=XUS&am=str" % (enocded_addr_with_none_subaddr), hrp)
# amount not exist
with pytest.raises(identifier.InvalidIntentIdentifierError):
identifier.decode_intent("diem://%s?c=XUS" % (enocded_addr_with_none_subaddr), hrp)
# too many amount
with pytest.raises(identifier.InvalidIntentIdentifierError):
identifier.decode_intent("diem://%s?c=XUS&am=2&am=3" % (enocded_addr_with_none_subaddr), hrp)
# amount is none
with pytest.raises(identifier.InvalidIntentIdentifierError):
identifier.decode_intent("diem://%s?c=XUS&am=" % (enocded_addr_with_none_subaddr), hrp)
# currency code not exist
with pytest.raises(identifier.InvalidIntentIdentifierError):
identifier.decode_intent("diem://%s?am=2" % (enocded_addr_with_none_subaddr), hrp)
# scheme not match
with pytest.raises(identifier.InvalidIntentIdentifierError):
identifier.decode_intent("hello://%s?am=2&c=XUS" % (enocded_addr_with_none_subaddr), hrp)
# hrp not match
with pytest.raises(identifier.InvalidIntentIdentifierError):
identifier.decode_intent("diem://%s?am=2&c=XUS" % (enocded_addr_with_none_subaddr), "xdm")
def test_decode_hrp(hrp_addresses):
hrp, enocded_addr_with_none_subaddr, enocded_addr_with_subaddr = hrp_addresses
assert identifier.decode_hrp(enocded_addr_with_none_subaddr) == hrp
assert identifier.decode_hrp(enocded_addr_with_subaddr) == hrp
with pytest.raises(ValueError):
identifier.decode_hrp("")
| 40.903846 | 108 | 0.758463 |
0713c2a792bbc06aa2d8a28412fa9d9603d8b18a
| 47,126 |
py
|
Python
|
dev/services/wms/mwfc/wms_cfg.py
|
zxh547/dea-config
|
41c07261921c667454074ce1c8c8f162baa04d0b
|
[
"Apache-2.0"
] | null | null | null |
dev/services/wms/mwfc/wms_cfg.py
|
zxh547/dea-config
|
41c07261921c667454074ce1c8c8f162baa04d0b
|
[
"Apache-2.0"
] | null | null | null |
dev/services/wms/mwfc/wms_cfg.py
|
zxh547/dea-config
|
41c07261921c667454074ce1c8c8f162baa04d0b
|
[
"Apache-2.0"
] | null | null | null |
response_cfg = {
"Access-Control-Allow-Origin": "*", # CORS header
"Cache-Control": "public, max-age=3600"
}
service_cfg = {
# Required config
"title": "Combined WMS Server",
"url": "https://mwfc.datakube.gadevs.ga/wms/",
"published_CRSs": {
"EPSG:3857": { # Web Mercator
"geographic": False,
"horizontal_coord": "x",
"vertical_coord": "y",
},
"EPSG:4326": { # WGS-84
"geographic": True,
"vertical_coord_first": True
},
"EPSG:3577": { # GDA-94, internal representation
"geographic": False,
"horizontal_coord": "easting",
"vertical_coord": "northing",
},
},
"layer_limit": 1,
"max_width": 512,
"max_height": 512,
"abstract": """Historic Landsat imagery for Australia.""",
"keywords": [
"Geomedian",
"australia",
"time-series",
],
"contact_info": {
"person": "Digital Earth Australia",
"organisation": "Geoscience Australia",
"position": "Technical Lead",
"address": {
"type": "postal",
"address": "GPO Box 378",
"city": "Canberra",
"state": "ACT",
"postcode": "2906",
"country": "Australia",
},
"telephone": "",
"fax": "",
"email": "",
},
"fees": "",
"access_constraints": "",
"wcs_formats": {
"GeoTIFF": {
"renderer": "datacube_wms.wcs_utils.get_tiff",
"mime": "image/geotiff",
"extension": "tif",
"multi-time": False
},
"netCDF": {
"renderer": "datacube_wms.wcs_utils.get_netcdf",
"mime": "application/x-netcdf",
"extension": "nc",
"multi-time": True,
}
},
"native_wcs_format": "GeoTIFF",
"default_geographic_CRS": "EPSG:4326",
"wcs": True,
"wms": True,
"use_default_extent": True
}
layer_cfg = [
{
# Name and title of the platform layer.
# Platform layers are not mappable. The name is for internal server use only.
"name": "Geomedian_AU_NBART",
"title": "Geomedian_au_nbart_surface_reflectance",
"abstract": "Images from the Geomedian Surface Reflectance on Level2 Products",
# Products available for this platform.
# For each product, the "name" is the Datacube name, and the label is used
# to describe the label to end-users.
"products": [
{
# Included as a keyword for the layer
"label": "LANDSAT_8",
# Included as a keyword for the layer
"type": "SR",
# Included as a keyword for the layer
"variant": "Level 2",
# The WMS name for the layer
"name": "ls8_nbart_geomedian_annual",
# The Datacube name for the associated data product
"product_name": "ls8_nbart_geomedian_annual",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "ls8_level1_usgs",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_manual_data_merge": True,
# "data_manual_merge": True,
# "pq_band": "quality",
# "always_fetch_bands": [ "quality" ],
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 500.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: data[band] != data[band].attrs['nodata'],
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"data_manual_merge": True,
"always_fetch_bands": [ ],
"apply_solar_corrections": False,
# A function that extracts the "sub-product" id (e.g. path number) from a dataset. Function should return a (small) integer
# If None or not specified, the product has no sub-layers.
# "sub_product_extractor": lambda ds: int(s3_path_pattern.search(ds.uris[0]).group("path")),
# A prefix used to describe the sub-layer in the GetCapabilities response.
# E.g. sub-layer 109 will be described as "Landsat Path 109"
# "sub_product_label": "Landsat Path",
# Bands to include in time-dimension "pixel drill".
# Don't activate in production unless you really know what you're doing.
# "band_drill": ["nir", "red", "green", "blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
# The raw band value range to be compressed to an 8 bit range for the output image tiles.
# Band values outside this range are clipped to 0 or 255 as appropriate.
"scale_range": [0.0, 3000.0]
},
{
"name": "infra_red",
"title": "False colour multi-band infra-red",
"abstract": "Simple false-colour image, using the near and short-wave infra-red bands",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"swir2": 1.0
},
"blue": {
"nir": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "infrared_green",
"title": "False colour SWIR, NIR and green",
"abstract": "False Colour image with SWIR1->Red, NIR->Green, and Green->Blue",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "blue",
"title": "Spectral band 2 - Blue",
"abstract": "Blue band, approximately 453nm to 511nm",
"components": {
"red": {
"blue": 1.0
},
"green": {
"blue": 1.0
},
"blue": {
"blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "green",
"title": "Spectral band 3 - Green",
"abstract": "Green band, approximately 534nm to 588nm",
"components": {
"red": {
"green": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red",
"title": "Spectral band 4 - Red",
"abstract": "Red band, roughly 637nm to 672nm",
"components": {
"red": {
"red": 1.0
},
"green": {
"red": 1.0
},
"blue": {
"red": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "nir",
"title": "Spectral band 5 - Near infra-red",
"abstract": "Near infra-red band, roughly 853nm to 876nm",
"components": {
"red": {
"nir": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"nir": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir1",
"title": "Spectral band 6 - Short wave infra-red 1",
"abstract": "Short wave infra-red band 1, roughly 1575nm to 1647nm",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"swir1": 1.0
},
"blue": {
"swir1": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir2",
"title": "Spectral band 7 - Short wave infra-red 2",
"abstract": "Short wave infra-red band 2, roughly 2117nm to 2285nm",
"components": {
"red": {
"swir2": 1.0
},
"green": {
"swir2": 1.0
},
"blue": {
"swir2": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
#
# Examples of non-linear heat-mapped styles.
{
"name": "ndvi",
"title": "NDVI",
"abstract": "Normalised Difference Vegetation Index - a derived index that correlates well with the existence of vegetation",
"heat_mapped": True,
"index_function": lambda data: (data["nir"] - data["red"]) / (data["nir"] + data["red"]),
"needed_bands": ["red", "nir"],
# Areas where the index_function returns outside the range are masked.
"range": [0.0, 1.0],
},
{
"name": "ndwi",
"title": "NDWI",
"abstract": "Normalised Difference Water Index - a derived index that correlates well with the existence of water",
"heat_mapped": True,
"index_function": lambda data: (data["green"] - data["nir"]) / (data["nir"] + data["green"]),
"needed_bands": ["green", "nir"],
"range": [0.0, 1.0],
},
{
"name": "ndbi",
"title": "NDBI",
"abstract": "Normalised Difference Buildup Index - a derived index that correlates with the existence of urbanisation",
"heat_mapped": True,
"index_function": lambda data: (data["swir2"] - data["nir"]) / (data["swir2"] + data["nir"]),
"needed_bands": ["swir2", "nir"],
"range": [0.0, 1.0],
},
# Mask layers - examples of how to display raw pixel quality data.
# This works by creatively mis-using the Heatmap style class.
# {
# "name": "cloud_mask",
# "title": "Cloud Mask",
# "abstract": "Highlight pixels with cloud.",
# "heat_mapped": True,
# "index_function": lambda data: data["red"] * 0.0 + 0.1,
# "needed_bands": ["red"],
# "range": [0.0, 1.0],
# # Mask flags normally describe which areas SHOULD be shown.
# (i.e. pixels for which any of the declared flags are true)
# pq_mask_invert is intended to invert this logic.
# (i.e. pixels for which none of the declared flags are true)
#
# i.e. Specifying like this shows pixels which are not clouds in either metric.
# Specifying "cloud" and setting the "pq_mask_invert" to False would
# show pixels which are not clouds in both metrics.
# "pq_masks": [
# {
# "flags": {
# "cloud": False,
# }
# }
# ],
# },
# {
# "name": "cloud_acca",
# "title": "Cloud acca Mask",
# "abstract": "Highlight pixels with cloud.",
# "heat_mapped": True,
# "index_function": lambda data: data["red"] * 0.0 + 0.4,
# "needed_bands": ["red"],
# "range": [0.0, 1.0],
# "pq_masks": [
# {
# "flags": {
# "cloud": True,
# }
# }
# ],
# },
# {
# "name": "cloud_fmask",
# "title": "Cloud fmask Mask",
# "abstract": "Highlight pixels with cloud.",
# "heat_mapped": True,
# "index_function": lambda data: data["red"] * 0.0 + 0.8,
# "needed_bands": ["red"],
# "range": [0.0, 1.0],
# "pq_masks": [
# {
# "flags": {
# "cloud_fmask": "cloud",
# },
# },
# ],
# },
# {
# "name": "contiguous_mask",
# "title": "Contiguous Data Mask",
# "abstract": "Highlight pixels with non-contiguous data",
# "heat_mapped": True,
# "index_function": lambda data: data["red"] * 0.0 + 0.3,
# "needed_bands": ["red"],
# "range": [0.0, 1.0],
# "pq_masks": [
# {
# "flags": {
# "contiguous": False
# },
# },
# ],
# },
# Hybrid style - mixes a linear mapping and a heat mapped index
{
"name": "rgb_ndvi",
"title": "NDVI plus RGB",
"abstract": "Normalised Difference Vegetation Index (blended with RGB) - a derived index that correlates well with the existence of vegetation",
"component_ratio": 0.6,
"heat_mapped": True,
"index_function": lambda data: (data["nir"] - data["red"]) / (data["nir"] + data["red"]),
"needed_bands": ["red", "nir"],
# Areas where the index_function returns outside the range are masked.
"range": [0.0, 1.0],
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
},
{
# Included as a keyword for the layer
"label": "LANDSAT_7",
# Included as a keyword for the layer
"type": "SR",
# Included as a keyword for the layer
"variant": "Level 2",
# The WMS name for the layer
"name": "ls7_nbart_geomedian_annual",
# The Datacube name for the associated data product
"product_name": "ls7_nbart_geomedian_annual",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "ls8_level1_usgs",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_manual_data_merge": True,
# "data_manual_merge": True,
# "pq_band": "quality",
# "always_fetch_bands": [ "quality" ],
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 500.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: data[band] != data[band].attrs['nodata'],
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"data_manual_merge": True,
"always_fetch_bands": [],
"apply_solar_corrections": False,
# A function that extracts the "sub-product" id (e.g. path number) from a dataset. Function should return a (small) integer
# If None or not specified, the product has no sub-layers.
# "sub_product_extractor": lambda ds: int(s3_path_pattern.search(ds.uris[0]).group("path")),
# A prefix used to describe the sub-layer in the GetCapabilities response.
# E.g. sub-layer 109 will be described as "Landsat Path 109"
# "sub_product_label": "Landsat Path",
# Bands to include in time-dimension "pixel drill".
# Don't activate in production unless you really know what you're doing.
# "band_drill": ["nir", "red", "green", "blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
# The raw band value range to be compressed to an 8 bit range for the output image tiles.
# Band values outside this range are clipped to 0 or 255 as appropriate.
"scale_range": [0.0, 3000.0]
},
{
"name": "infra_red",
"title": "False colour multi-band infra-red",
"abstract": "Simple false-colour image, using the near and short-wave infra-red bands",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"swir2": 1.0
},
"blue": {
"nir": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "infrared_green",
"title": "False colour SWIR, NIR and green",
"abstract": "False Colour image with SWIR1->Red, NIR->Green, and Green->Blue",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "blue",
"title": "Spectral band 2 - Blue",
"abstract": "Blue band, approximately 453nm to 511nm",
"components": {
"red": {
"blue": 1.0
},
"green": {
"blue": 1.0
},
"blue": {
"blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "green",
"title": "Spectral band 3 - Green",
"abstract": "Green band, approximately 534nm to 588nm",
"components": {
"red": {
"green": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red",
"title": "Spectral band 4 - Red",
"abstract": "Red band, roughly 637nm to 672nm",
"components": {
"red": {
"red": 1.0
},
"green": {
"red": 1.0
},
"blue": {
"red": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "nir",
"title": "Spectral band 5 - Near infra-red",
"abstract": "Near infra-red band, roughly 853nm to 876nm",
"components": {
"red": {
"nir": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"nir": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir1",
"title": "Spectral band 6 - Short wave infra-red 1",
"abstract": "Short wave infra-red band 1, roughly 1575nm to 1647nm",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"swir1": 1.0
},
"blue": {
"swir1": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir2",
"title": "Spectral band 7 - Short wave infra-red 2",
"abstract": "Short wave infra-red band 2, roughly 2117nm to 2285nm",
"components": {
"red": {
"swir2": 1.0
},
"green": {
"swir2": 1.0
},
"blue": {
"swir2": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
#
# Examples of non-linear heat-mapped styles.
{
"name": "ndvi",
"title": "NDVI",
"abstract": "Normalised Difference Vegetation Index - a derived index that correlates well with the existence of vegetation",
"heat_mapped": True,
"index_function": lambda data: (data["nir"] - data["red"]) / (data["nir"] + data["red"]),
"needed_bands": ["red", "nir"],
# Areas where the index_function returns outside the range are masked.
"range": [0.0, 1.0],
},
{
"name": "ndwi",
"title": "NDWI",
"abstract": "Normalised Difference Water Index - a derived index that correlates well with the existence of water",
"heat_mapped": True,
"index_function": lambda data: (data["green"] - data["nir"]) / (data["nir"] + data["green"]),
"needed_bands": ["green", "nir"],
"range": [0.0, 1.0],
},
{
"name": "ndbi",
"title": "NDBI",
"abstract": "Normalised Difference Buildup Index - a derived index that correlates with the existence of urbanisation",
"heat_mapped": True,
"index_function": lambda data: (data["swir2"] - data["nir"]) / (data["swir2"] + data["nir"]),
"needed_bands": ["swir2", "nir"],
"range": [0.0, 1.0],
},
{
"name": "rgb_ndvi",
"title": "NDVI plus RGB",
"abstract": "Normalised Difference Vegetation Index (blended with RGB) - a derived index that correlates well with the existence of vegetation",
"component_ratio": 0.6,
"heat_mapped": True,
"index_function": lambda data: (data["nir"] - data["red"]) / (data["nir"] + data["red"]),
"needed_bands": ["red", "nir"],
# Areas where the index_function returns outside the range are masked.
"range": [0.0, 1.0],
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
},
{
# Included as a keyword for the layer
"label": "LANDSAT_5",
# Included as a keyword for the layer
"type": "SR",
# Included as a keyword for the layer
"variant": "Level 2",
# The WMS name for the layer
"name": "ls5_nbart_geomedian_annual",
# The Datacube name for the associated data product
"product_name": "ls5_nbart_geomedian_annual",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "ls8_level1_usgs",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_manual_data_merge": True,
# "data_manual_merge": True,
# "pq_band": "quality",
# "always_fetch_bands": [ "quality" ],
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 500.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: data[band] != data[band].attrs['nodata'],
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"data_manual_merge": True,
"always_fetch_bands": [],
"apply_solar_corrections": False,
# A function that extracts the "sub-product" id (e.g. path number) from a dataset. Function should return a (small) integer
# If None or not specified, the product has no sub-layers.
# "sub_product_extractor": lambda ds: int(s3_path_pattern.search(ds.uris[0]).group("path")),
# A prefix used to describe the sub-layer in the GetCapabilities response.
# E.g. sub-layer 109 will be described as "Landsat Path 109"
# "sub_product_label": "Landsat Path",
# Bands to include in time-dimension "pixel drill".
# Don't activate in production unless you really know what you're doing.
# "band_drill": ["nir", "red", "green", "blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
# The raw band value range to be compressed to an 8 bit range for the output image tiles.
# Band values outside this range are clipped to 0 or 255 as appropriate.
"scale_range": [0.0, 3000.0]
},
{
"name": "infra_red",
"title": "False colour multi-band infra-red",
"abstract": "Simple false-colour image, using the near and short-wave infra-red bands",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"swir2": 1.0
},
"blue": {
"nir": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "infrared_green",
"title": "False colour SWIR, NIR and green",
"abstract": "False Colour image with SWIR1->Red, NIR->Green, and Green->Blue",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "blue",
"title": "Spectral band 2 - Blue",
"abstract": "Blue band, approximately 453nm to 511nm",
"components": {
"red": {
"blue": 1.0
},
"green": {
"blue": 1.0
},
"blue": {
"blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "green",
"title": "Spectral band 3 - Green",
"abstract": "Green band, approximately 534nm to 588nm",
"components": {
"red": {
"green": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "red",
"title": "Spectral band 4 - Red",
"abstract": "Red band, roughly 637nm to 672nm",
"components": {
"red": {
"red": 1.0
},
"green": {
"red": 1.0
},
"blue": {
"red": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "nir",
"title": "Spectral band 5 - Near infra-red",
"abstract": "Near infra-red band, roughly 853nm to 876nm",
"components": {
"red": {
"nir": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"nir": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir1",
"title": "Spectral band 6 - Short wave infra-red 1",
"abstract": "Short wave infra-red band 1, roughly 1575nm to 1647nm",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"swir1": 1.0
},
"blue": {
"swir1": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "swir2",
"title": "Spectral band 7 - Short wave infra-red 2",
"abstract": "Short wave infra-red band 2, roughly 2117nm to 2285nm",
"components": {
"red": {
"swir2": 1.0
},
"green": {
"swir2": 1.0
},
"blue": {
"swir2": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
{
"name": "rgb_ndvi",
"title": "NDVI plus RGB",
"abstract": "Normalised Difference Vegetation Index (blended with RGB) - a derived index that correlates well with the existence of vegetation",
"component_ratio": 0.6,
"heat_mapped": True,
"index_function": lambda data: (data["nir"] - data["red"]) / (data["nir"] + data["red"]),
"needed_bands": ["red", "nir"],
# Areas where the index_function returns outside the range are masked.
"range": [0.0, 1.0],
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
}
]
},
{
"name": "mangrove_cover",
"title": "Mangrove Cover",
"abstract": "Mangrove Cover",
"products": [
{
"label": "Mangrove Cover",
"type": "Level3",
"variant": "Level 3",
"name": "mangrove_cover",
"product_name": "mangrove_cover",
"min_zoom_factor": 500.0,
"zoomed_out_fill_colour": [150, 180, 200, 160],
"time_zone": 9,
"extent_mask_func": lambda data, band: data["extent"] == 1,
"ignore_info_flags": [],
"data_manual_merge": False,
"always_fetch_bands": ["extent"],
"apply_solar_corrections": False,
"styles": [
{
"name": "mangrove",
"title": "Mangrove Cover",
"abstract": "Mangrove Cover",
"value_map": {
"canopy_cover_class": [
{
"flags": {
"woodland": True
},
"values": {
"red": 159,
"green": 255,
"blue": 76
}
},
{
"flags": {
"open_forest": True
},
"values": {
"red": 94,
"green": 204,
"blue": 0
}
},
{
"flags": {
"closed_forest": True
},
"values": {
"red": 59,
"green": 127,
"blue": 0
}
},
]
}
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "mangrove",
},
]
},
]
| 45.576402 | 168 | 0.342338 |
964bb890edaa67f281e1b4301a9f5a4bb12f8e99
| 14,774 |
py
|
Python
|
raiden/blockchain_events_handler.py
|
anmolshl/raiden
|
f1cecb68cb43a2c00b2f719eadbe83137611a92a
|
[
"MIT"
] | null | null | null |
raiden/blockchain_events_handler.py
|
anmolshl/raiden
|
f1cecb68cb43a2c00b2f719eadbe83137611a92a
|
[
"MIT"
] | null | null | null |
raiden/blockchain_events_handler.py
|
anmolshl/raiden
|
f1cecb68cb43a2c00b2f719eadbe83137611a92a
|
[
"MIT"
] | null | null | null |
import gevent
import structlog
from eth_utils import to_canonical_address
from raiden.blockchain.events import get_channel_proxies, decode_event_to_internal
from raiden.blockchain.state import get_channel_state
from raiden.connection_manager import ConnectionManager
from raiden.transfer import views
from raiden.utils import pex, data_decoder
from raiden.transfer.state import (
TransactionChannelNewBalance,
TokenNetworkState,
)
from raiden.transfer.state_change import (
ContractReceiveChannelClosed,
ContractReceiveChannelNew,
ContractReceiveChannelNewBalance,
ContractReceiveChannelSettled,
ContractReceiveChannelUnlock,
ContractReceiveNewTokenNetwork,
ContractReceiveSecretReveal,
ContractReceiveRouteNew,
)
from raiden.blockchain.abi import (
EVENT_TOKEN_ADDED,
EVENT_TOKEN_ADDED2,
EVENT_CHANNEL_NEW,
EVENT_CHANNEL_NEW2,
EVENT_CHANNEL_NEW_BALANCE,
EVENT_CHANNEL_NEW_BALANCE2,
EVENT_CHANNEL_WITHDRAW,
EVENT_CHANNEL_UNLOCK,
EVENT_BALANCE_PROOF_UPDATED,
EVENT_TRANSFER_UPDATED,
EVENT_CHANNEL_CLOSED,
EVENT_CHANNEL_SETTLED,
EVENT_CHANNEL_SECRET_REVEALED,
EVENT_CHANNEL_SECRET_REVEALED2,
)
log = structlog.get_logger(__name__) # pylint: disable=invalid-name
def handle_tokennetwork_new(raiden, event, current_block_number):
data = event.event_data
manager_address = data['channel_manager_address']
registry_address = data['registry_address']
registry = raiden.chain.registry(registry_address)
manager_proxy = registry.manager(manager_address)
netting_channel_proxies = get_channel_proxies(raiden.chain, raiden.address, manager_proxy)
# Install the filters first to avoid missing changes, as a consequence
# some events might be applied twice.
raiden.blockchain_events.add_channel_manager_listener(manager_proxy)
for channel_proxy in netting_channel_proxies:
raiden.blockchain_events.add_netting_channel_listener(channel_proxy)
token_address = data_decoder(event.event_data['args']['token_address'])
token_network_state = TokenNetworkState(
manager_address,
token_address,
)
new_payment_network = ContractReceiveNewTokenNetwork(
event.originating_contract,
token_network_state,
)
raiden.handle_state_change(new_payment_network, current_block_number)
def handle_tokennetwork_new2(raiden, event, current_block_number):
""" Handles a `TokenNetworkCreated` event. """
data = event.event_data
token_network_address = data['token_network_address']
token_network_registry_address = event.originating_contract
token_network_registry_proxy = raiden.chain.token_network_registry(
token_network_registry_address,
)
token_network_proxy = token_network_registry_proxy.token_network(token_network_address)
raiden.blockchain_events.add_token_network_listener(token_network_proxy)
token_address = data_decoder(event.event_data['args']['token_address'])
token_network_state = TokenNetworkState(
token_network_address,
token_address,
)
new_token_network = ContractReceiveNewTokenNetwork(
event.originating_contract,
token_network_state,
)
raiden.handle_state_change(new_token_network, current_block_number)
def handle_channel_new(raiden, event, current_block_number):
data = event.event_data
registry_address = data['registry_address']
token_network_address = event.originating_contract
participant1 = data['participant1']
participant2 = data['participant2']
is_participant = raiden.address in (participant1, participant2)
if is_participant:
channel_proxy = raiden.chain.netting_channel(data['netting_channel'])
token_address = channel_proxy.token_address()
channel_state = get_channel_state(
token_address,
token_network_address,
raiden.config['reveal_timeout'],
channel_proxy,
)
new_channel = ContractReceiveChannelNew(
token_network_address,
channel_state,
)
raiden.handle_state_change(new_channel, current_block_number)
partner_address = channel_state.partner_state.address
connection_manager = raiden.connection_manager_for_token(
registry_address, token_address,
)
if ConnectionManager.BOOTSTRAP_ADDR != partner_address:
raiden.start_health_check_for(partner_address)
gevent.spawn(connection_manager.retry_connect, registry_address)
# Start the listener *after* the channel is registered, to avoid None
# exceptions (and not applying the event state change).
#
# TODO: install the filter on the same block or previous block in which
# the channel state was queried
raiden.blockchain_events.add_netting_channel_listener(channel_proxy)
else:
new_route = ContractReceiveRouteNew(
token_network_address,
participant1,
participant2,
)
raiden.handle_state_change(new_route, current_block_number)
def handle_channel_new_balance(raiden, event, current_block_number):
data = event.event_data
registry_address = data['registry_address']
channel_identifier = event.originating_contract
token_address = data['token_address']
participant_address = data['participant']
new_balance = data['balance']
deposit_block_number = data['block_number']
previous_channel_state = views.get_channelstate_by_tokenaddress(
views.state_from_raiden(raiden),
registry_address,
token_address,
channel_identifier,
)
# Channels will only be registered if this node is a participant
is_participant = previous_channel_state is not None
if is_participant:
previous_balance = previous_channel_state.our_state.contract_balance
balance_was_zero = previous_balance == 0
token_network_identifier = views.get_token_network_identifier_by_token_address(
views.state_from_raiden(raiden),
registry_address,
token_address,
)
deposit_transaction = TransactionChannelNewBalance(
participant_address,
new_balance,
deposit_block_number,
)
newbalance_statechange = ContractReceiveChannelNewBalance(
token_network_identifier,
channel_identifier,
deposit_transaction,
)
raiden.handle_state_change(newbalance_statechange, current_block_number)
if balance_was_zero:
connection_manager = raiden.connection_manager_for_token(
registry_address, token_address,
)
gevent.spawn(
connection_manager.join_channel,
registry_address,
participant_address,
new_balance,
)
def handle_channel_closed(raiden, event, current_block_number):
registry_address = event.event_data['registry_address']
channel_identifier = event.originating_contract
data = event.event_data
channel_state = views.search_for_channel(
views.state_from_raiden(raiden),
registry_address,
channel_identifier,
)
if channel_state:
token_network_identifier = views.get_token_network_identifier_by_token_address(
views.state_from_raiden(raiden),
registry_address,
channel_state.token_address,
)
channel_closed = ContractReceiveChannelClosed(
token_network_identifier,
channel_identifier,
data['closing_address'],
data['block_number'],
)
raiden.handle_state_change(channel_closed, current_block_number)
def handle_channel_settled(raiden, event, current_block_number):
registry_address = event.event_data['registry_address']
data = event.event_data
channel_identifier = event.originating_contract
channel_state = views.search_for_channel(
views.state_from_raiden(raiden),
registry_address,
channel_identifier,
)
if channel_state:
token_network_identifier = views.get_token_network_identifier_by_token_address(
views.state_from_raiden(raiden),
registry_address,
channel_state.token_address,
)
channel_settled = ContractReceiveChannelSettled(
token_network_identifier,
channel_identifier,
data['block_number'],
)
raiden.handle_state_change(channel_settled, current_block_number)
def handle_channel_unlock(raiden, event, current_block_number):
channel_identifier = event.originating_contract
data = event.event_data
registry_address = data['registry_address']
channel_state = views.search_for_channel(
views.state_from_raiden(raiden),
registry_address,
channel_identifier,
)
if channel_state:
unlock_state_change = ContractReceiveChannelUnlock(
registry_address,
channel_state.token_address,
channel_identifier,
data['secret'],
data['receiver_address'],
)
raiden.handle_state_change(unlock_state_change, current_block_number)
def handle_secret_revealed(raiden, event, current_block_number):
secret_registry_address = event.originating_contract
data = event.event_data
registeredsecret_state_change = ContractReceiveSecretReveal(
secret_registry_address,
data['secrethash'],
data['secret'],
)
raiden.handle_state_change(registeredsecret_state_change, current_block_number)
def on_blockchain_event(raiden, event, current_block_number):
log.debug(
'EVENT',
node=pex(raiden.address),
chain_event=event,
block_number=current_block_number,
)
data = event.event_data
if data['event'] == EVENT_TOKEN_ADDED:
data['registry_address'] = to_canonical_address(data['args']['registry_address'])
data['channel_manager_address'] = to_canonical_address(
data['args']['channel_manager_address'],
)
handle_tokennetwork_new(raiden, event, current_block_number)
elif data['event'] == EVENT_CHANNEL_NEW:
data['registry_address'] = to_canonical_address(data['args']['registry_address'])
data['participant1'] = to_canonical_address(data['args']['participant1'])
data['participant2'] = to_canonical_address(data['args']['participant2'])
handle_channel_new(raiden, event, current_block_number)
elif data['event'] == EVENT_CHANNEL_NEW_BALANCE:
data['registry_address'] = to_canonical_address(data['args']['registry_address'])
data['token_address'] = to_canonical_address(data['args']['token_address'])
data['participant'] = to_canonical_address(data['args']['participant'])
data['balance'] = data['args']['balance']
handle_channel_new_balance(raiden, event, current_block_number)
elif data['event'] == EVENT_CHANNEL_CLOSED:
data['registry_address'] = to_canonical_address(data['args']['registry_address'])
data['closing_address'] = to_canonical_address(data['args']['closing_address'])
handle_channel_closed(raiden, event, current_block_number)
elif data['event'] == EVENT_CHANNEL_SETTLED:
data['registry_address'] = to_canonical_address(data['args']['registry_address'])
handle_channel_settled(raiden, event, current_block_number)
elif data['event'] == EVENT_CHANNEL_SECRET_REVEALED:
data['registry_address'] = to_canonical_address(data['args']['registry_address'])
data['receiver_address'] = to_canonical_address(data['args']['receiver_address'])
data['secret'] = data['args']['secret']
handle_channel_unlock(raiden, event, current_block_number)
# fix for https://github.com/raiden-network/raiden/issues/1508
# balance proof updates are handled in the linked code, so no action is needed here
# https://github.com/raiden-network/raiden/blob/da54ef4b20fb006c126fcb091b18269314c2003b/raiden/transfer/channel.py#L1337-L1344 # noqa
elif data['event'] == EVENT_TRANSFER_UPDATED:
pass
else:
log.error('Unknown event type', event_name=data['event'], raiden_event=event)
def on_blockchain_event2(raiden, event, current_block_number):
log.debug('EVENT', node=pex(raiden.address), chain_event=event)
event = decode_event_to_internal(event)
data = event.event_data
if data['args'].get('channel_identifier'):
data['channel_identifier'] = data['args'].get('channel_identifier')
if data['event'] == EVENT_TOKEN_ADDED2:
handle_tokennetwork_new2(raiden, event, current_block_number)
elif data['event'] == EVENT_CHANNEL_NEW2:
data['settle_timeout'] = data['args']['settle_timeout']
handle_channel_new(raiden, event, current_block_number)
elif data['event'] == EVENT_CHANNEL_NEW_BALANCE2:
data['deposit'] = data['args']['deposit']
handle_channel_new_balance(raiden, event, current_block_number)
elif data['event'] == EVENT_CHANNEL_WITHDRAW:
data['withdrawn_amount'] = data['args']['withdrawn_amount']
# handle_channel_withdraw(raiden, event)
raise NotImplementedError('handle_channel_withdraw not implemented yet')
elif data['event'] == EVENT_CHANNEL_UNLOCK:
data['unlocked_amount'] = data['args']['unlocked_amount']
data['returned_tokens'] = data['args']['returned_tokens']
# handle_channel_unlock(raiden, event)
raise NotImplementedError('handle_channel_unlock not implemented yet')
elif data['event'] == EVENT_BALANCE_PROOF_UPDATED:
# balance proof updates are handled in the linked code, so no action is needed here
# https://github.com/raiden-network/raiden/blob/da54ef4b20fb006c126fcb091b18269314c2003b/raiden/transfer/channel.py#L1337-L1344 # noqa
pass
elif data['event'] == EVENT_CHANNEL_CLOSED:
handle_channel_closed(raiden, event, current_block_number)
elif data['event'] == EVENT_CHANNEL_SETTLED:
data['participant1_amount'] = data['args']['participant1_amount']
data['participant2_amount'] = data['args']['participant2_amount']
handle_channel_settled(raiden, event, current_block_number)
elif data['event'] == EVENT_CHANNEL_SECRET_REVEALED2:
data['secrethash'] = data['args']['secrethash']
data['secret'] = data['args']['secret']
handle_secret_revealed(raiden, event, current_block_number)
else:
log.error('Unknown event type', event_name=data['event'], raiden_event=event)
| 37.027569 | 143 | 0.715784 |
70b31fefbd2906d37a52237764a8ef1103f55500
| 1,255 |
py
|
Python
|
creepycraig_v2/pipelines.py
|
frankolson/creepycraig_v2
|
c6a14a6a28928b70ddd93cb5e0a3885b763082aa
|
[
"MIT"
] | null | null | null |
creepycraig_v2/pipelines.py
|
frankolson/creepycraig_v2
|
c6a14a6a28928b70ddd93cb5e0a3885b763082aa
|
[
"MIT"
] | null | null | null |
creepycraig_v2/pipelines.py
|
frankolson/creepycraig_v2
|
c6a14a6a28928b70ddd93cb5e0a3885b763082aa
|
[
"MIT"
] | null | null | null |
import pymongo
from scrapy.conf import settings
from creepycraig_v2.items import ApartmentItem, CarItem
class ApartmentPipeline(object):
def __init__(self):
connection = pymongo.MongoClient(
settings['MONGODB_SERVER'],
settings['MONGODB_PORT']
)
db = connection[settings['MONGODB_DB']]
self.collection = db[settings['MONGODB_APA_COLLECTION']] # use the person collection
def process_item(self, item, spider):
if not isinstance(item, ApartmentItem):
return item # return the item to let another pipeline to handle it
self.collection.insert(dict(item))
class CarPipeline(object):
def __init__(self):
connection = pymongo.MongoClient(
settings['MONGODB_SERVER'],
settings['MONGODB_PORT']
)
db = connection[settings['MONGODB_DB']]
self.collection = db[settings['MONGODB_CTO_COLLECTION']] # use the book collection
def process_item(self, item, spider):
if not isinstance(item, CarItem):
return item # return the item to let another pipeline to handle it
self.collection.update({'cl_id': item['cl_id']}, dict(item), upsert=True)
self.collection.insert(dict(item))
| 36.911765 | 92 | 0.664542 |
122e31db4b89452d1edd4b98a488c680ce34e129
| 3,187 |
py
|
Python
|
src/repro/plots/data.py
|
bouthilx/repro
|
611734e4eddd6a76dd4c1e7114a28a634a2a75c1
|
[
"BSD-3-Clause"
] | null | null | null |
src/repro/plots/data.py
|
bouthilx/repro
|
611734e4eddd6a76dd4c1e7114a28a634a2a75c1
|
[
"BSD-3-Clause"
] | null | null | null |
src/repro/plots/data.py
|
bouthilx/repro
|
611734e4eddd6a76dd4c1e7114a28a634a2a75c1
|
[
"BSD-3-Clause"
] | null | null | null |
import random
import json
from mahler.client import Client
mahler_client = Client()
db_client = mahler_client.registrar._db._db
VERSION_TAGS = ['v4.2.3', 'v4.2.4', 'v4.2.5', 'v4.2.6',
'v5.0.0']
N_POINTS = 10
colors = """\
#8dd3c7
#d3d394
#bebada
#fb8072
#80b1d3
#fdb462
#b3de69
#fccde5
#d9d9d9
#bc80bd
#ccebc5
#ffed6f""".split("\n")
MODELS = ['lenet', 'mobilenetv2', 'vgg11', 'vgg19', 'densenet121', 'densenet201', 'resnet18', 'resnet101']
DATASETS = ['mnist', 'cifar10', 'cifar100']
# points: 10
# models
# datasets
# colors (for models)
# data:
# mnist:
# lenet: [0.1, 0.3, ...]
# mobilenetv2: [0.1, 0.3, ...]
#
base = dict(
models=MODELS,
datasets=DATASETS,
colors=dict(zip(MODELS, colors)),
data=dict())
def dump(fetch_fct):
data = dict()
for dataset_name in DATASETS:
data[dataset_name] = dict()
for model_name in MODELS:
data[dataset_name][model_name] = fetch_fct(dataset_name, model_name)
return data
def dump_seed():
base['data'] = dump(fetch_seed)
with open('seed.json', 'w') as f:
f.write(json.dumps(base))
def dump_hpo():
base['data'] = dump(fetch_hpo)
with open('hpo.json', 'w') as f:
f.write(json.dumps(base))
def fetch_seed(dataset_name, model_name):
data = []
for version in VERSION_TAGS:
query = {}
query['registry.tags'] = {'$all': [version, model_name, dataset_name, 'seed']}
query['registry.status'] = 'Completed'
projection = {'output.last.test.error_rate': 1}
trials = db_client.tasks.report.find(query, projection=projection)
for trial in trials:
try:
data.append(trial['output']['last']['test']['error_rate'])
print('Adding true {:>20} {:>20}'.format(dataset_name, model_name))
except KeyError:
pass
if len(data) >= N_POINTS:
break
while len(data) < N_POINTS:
print('Adding fake {:>20} {:>20}'.format(dataset_name, model_name))
data.append(random.random())
return data
def fetch_hpo(dataset_name, model_name):
data = []
for version in VERSION_TAGS:
query = {}
query['registry.tags'] = {'$all': [version, model_name, dataset_name, 'distrib', 'min']}
query['registry.status'] = 'Completed'
projection = {'output.last.test.error_rate': 1}
trials = db_client.tasks.report.find(query, projection=projection)
for trial in trials:
try:
data.append(trial['output']['last']['test']['error_rate'])
print('Adding true {:>20} {:>20}'.format(dataset_name, model_name))
except KeyError:
pass
if len(data) >= N_POINTS:
break
while len(data) < N_POINTS:
print('Adding fake {:>20} {:>20}'.format(dataset_name, model_name))
data.append(random.random())
return data
if __name__ == "__main__":
print(" ---")
print(" SEEDS")
print(" ---")
dump_seed()
print("\n\n")
print(" ---")
print(" HPO")
print(" ---")
dump_hpo()
| 23.433824 | 106 | 0.571384 |
9773174732d9289b9b379fce6d8f8fd06584e872
| 2,055 |
py
|
Python
|
fn_stats.py
|
newsreader/vua-srl-postprocess
|
0b46bf0235c27444aff1bffb647e2143a7c2d332
|
[
"Apache-2.0"
] | null | null | null |
fn_stats.py
|
newsreader/vua-srl-postprocess
|
0b46bf0235c27444aff1bffb647e2143a7c2d332
|
[
"Apache-2.0"
] | null | null | null |
fn_stats.py
|
newsreader/vua-srl-postprocess
|
0b46bf0235c27444aff1bffb647e2143a7c2d332
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from KafNafParserPy import *
from rdflib import URIRef, Namespace
from rdflib.namespace import RDF,Namespace, NamespaceManager
from rdflib.graph import Graph
import sys
import os
path='example'
predicates_pos_fn = 0
predicates_neg_fn = 0
predicates_fn_total=0
roles_pos_fn = 0
roles_neg_fn = 0
roles_fn_total=0
for root, dirs, files in os.walk(path):
for inputfile in files:
try:
# Parse using the KafNafParser
my_parser = KafNafParser(root + "/" + inputfile)
except:
continue
# Iterate over the predicates and check for ESO predicates in the external references
for predicate in my_parser.get_predicates():
for ext_ref in predicate.get_external_references():
if ext_ref.get_resource()=='FrameNet+':
predicates_pos_fn+=1
elif ext_ref.get_resource()=='FrameNet-':
predicates_neg_fn+=1
elif ext_ref.get_resource()=='FrameNet':
predicates_fn_total+=1
# When there is an ESO choice, iterate through the roles and identify the right FrameNet meanings there as well
for role in predicate.get_roles():
for role_ext_ref in role.get_external_references():
if role_ext_ref.get_resource()=='FrameNet+':
roles_pos_fn+=1
elif role_ext_ref.get_resource()=='FrameNet-':
roles_neg_fn+=1
elif role_ext_ref.get_resource()=='FrameNet':
roles_fn_total+=1
print "Positive FrameNet predicates: " + str(predicates_pos_fn)
print "Negative FrameNet predicates:" + str(predicates_neg_fn)
print "Unjudged FrameNet predicates: " + str(predicates_fn_total)
print "Positive FrameNet roles: " + str(roles_pos_fn)
print "Negative FrameNet roles:" + str(roles_neg_fn)
print "Unjudged FrameNet roles: " + str(roles_fn_total)
| 34.25 | 127 | 0.624818 |
8189a1ca7926edd8b95c8a4bc8cffd0b233e4cce
| 8,958 |
py
|
Python
|
hubspot/crm/companies/models/error.py
|
cclauss/hubspot-api-python
|
7c60c0f572b98c73e1f1816bf5981396a42735f6
|
[
"Apache-2.0"
] | null | null | null |
hubspot/crm/companies/models/error.py
|
cclauss/hubspot-api-python
|
7c60c0f572b98c73e1f1816bf5981396a42735f6
|
[
"Apache-2.0"
] | null | null | null |
hubspot/crm/companies/models/error.py
|
cclauss/hubspot-api-python
|
7c60c0f572b98c73e1f1816bf5981396a42735f6
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Companies
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.companies.configuration import Configuration
class Error(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'message': 'str',
'correlation_id': 'str',
'category': 'str',
'sub_category': 'str',
'errors': 'list[ErrorDetail]',
'context': 'dict(str, list[str])',
'links': 'dict(str, str)'
}
attribute_map = {
'message': 'message',
'correlation_id': 'correlationId',
'category': 'category',
'sub_category': 'subCategory',
'errors': 'errors',
'context': 'context',
'links': 'links'
}
def __init__(self, message=None, correlation_id=None, category=None, sub_category=None, errors=None, context=None, links=None, local_vars_configuration=None): # noqa: E501
"""Error - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._message = None
self._correlation_id = None
self._category = None
self._sub_category = None
self._errors = None
self._context = None
self._links = None
self.discriminator = None
self.message = message
self.correlation_id = correlation_id
self.category = category
if sub_category is not None:
self.sub_category = sub_category
if errors is not None:
self.errors = errors
if context is not None:
self.context = context
if links is not None:
self.links = links
@property
def message(self):
"""Gets the message of this Error. # noqa: E501
A human readable message describing the error along with remediation steps where appropriate # noqa: E501
:return: The message of this Error. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this Error.
A human readable message describing the error along with remediation steps where appropriate # noqa: E501
:param message: The message of this Error. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and message is None: # noqa: E501
raise ValueError("Invalid value for `message`, must not be `None`") # noqa: E501
self._message = message
@property
def correlation_id(self):
"""Gets the correlation_id of this Error. # noqa: E501
A unique identifier for the request. Include this value with any error reports or support tickets # noqa: E501
:return: The correlation_id of this Error. # noqa: E501
:rtype: str
"""
return self._correlation_id
@correlation_id.setter
def correlation_id(self, correlation_id):
"""Sets the correlation_id of this Error.
A unique identifier for the request. Include this value with any error reports or support tickets # noqa: E501
:param correlation_id: The correlation_id of this Error. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and correlation_id is None: # noqa: E501
raise ValueError("Invalid value for `correlation_id`, must not be `None`") # noqa: E501
self._correlation_id = correlation_id
@property
def category(self):
"""Gets the category of this Error. # noqa: E501
The error category # noqa: E501
:return: The category of this Error. # noqa: E501
:rtype: str
"""
return self._category
@category.setter
def category(self, category):
"""Sets the category of this Error.
The error category # noqa: E501
:param category: The category of this Error. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and category is None: # noqa: E501
raise ValueError("Invalid value for `category`, must not be `None`") # noqa: E501
self._category = category
@property
def sub_category(self):
"""Gets the sub_category of this Error. # noqa: E501
A specific category that contains more specific detail about the error # noqa: E501
:return: The sub_category of this Error. # noqa: E501
:rtype: str
"""
return self._sub_category
@sub_category.setter
def sub_category(self, sub_category):
"""Sets the sub_category of this Error.
A specific category that contains more specific detail about the error # noqa: E501
:param sub_category: The sub_category of this Error. # noqa: E501
:type: str
"""
self._sub_category = sub_category
@property
def errors(self):
"""Gets the errors of this Error. # noqa: E501
further information about the error # noqa: E501
:return: The errors of this Error. # noqa: E501
:rtype: list[ErrorDetail]
"""
return self._errors
@errors.setter
def errors(self, errors):
"""Sets the errors of this Error.
further information about the error # noqa: E501
:param errors: The errors of this Error. # noqa: E501
:type: list[ErrorDetail]
"""
self._errors = errors
@property
def context(self):
"""Gets the context of this Error. # noqa: E501
Context about the error condition # noqa: E501
:return: The context of this Error. # noqa: E501
:rtype: dict(str, list[str])
"""
return self._context
@context.setter
def context(self, context):
"""Sets the context of this Error.
Context about the error condition # noqa: E501
:param context: The context of this Error. # noqa: E501
:type: dict(str, list[str])
"""
self._context = context
@property
def links(self):
"""Gets the links of this Error. # noqa: E501
A map of link names to associated URIs containing documentation about the error or recommended remediation steps # noqa: E501
:return: The links of this Error. # noqa: E501
:rtype: dict(str, str)
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this Error.
A map of link names to associated URIs containing documentation about the error or recommended remediation steps # noqa: E501
:param links: The links of this Error. # noqa: E501
:type: dict(str, str)
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Error):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Error):
return True
return self.to_dict() != other.to_dict()
| 30.469388 | 176 | 0.599576 |
fcdbf80092bca71a094e68e797eaf72ec772df2d
| 1,590 |
py
|
Python
|
internal/notes/builtin-SAVE/packages/py-netifaces/package.py
|
HPCToolkit/hpctest
|
5ff4455582bf39e75530a31badcf6142081b386b
|
[
"BSD-3-Clause"
] | 1 |
2019-01-17T20:07:19.000Z
|
2019-01-17T20:07:19.000Z
|
internal/notes/builtin-SAVE/packages/py-netifaces/package.py
|
HPCToolkit/hpctest
|
5ff4455582bf39e75530a31badcf6142081b386b
|
[
"BSD-3-Clause"
] | null | null | null |
internal/notes/builtin-SAVE/packages/py-netifaces/package.py
|
HPCToolkit/hpctest
|
5ff4455582bf39e75530a31badcf6142081b386b
|
[
"BSD-3-Clause"
] | 2 |
2019-08-06T18:13:57.000Z
|
2021-11-05T18:19:49.000Z
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyNetifaces(PythonPackage):
"""Portable network interface information"""
homepage = "https://bitbucket.org/al45tair/netifaces"
url = "https://pypi.io/packages/source/n/netifaces/netifaces-0.10.5.tar.gz"
version('0.10.5', '5b4d1f1310ed279e6df27ef3a9b71519')
depends_on('py-setuptools', type='build')
| 42.972973 | 84 | 0.679874 |
b66759bba3a5b2f1256a998264ca2c4c886c4fae
| 2,505 |
py
|
Python
|
chainer_chemistry/utils/permutation.py
|
jo7ueb/chainer-chemistry
|
74286aef6b53f272d1677b8e226bb7200a3922a8
|
[
"MIT"
] | null | null | null |
chainer_chemistry/utils/permutation.py
|
jo7ueb/chainer-chemistry
|
74286aef6b53f272d1677b8e226bb7200a3922a8
|
[
"MIT"
] | null | null | null |
chainer_chemistry/utils/permutation.py
|
jo7ueb/chainer-chemistry
|
74286aef6b53f272d1677b8e226bb7200a3922a8
|
[
"MIT"
] | null | null | null |
import numpy
def permute_node(node, permutation_index, axis=-1):
"""Permute index of `node` array
Args:
node (numpy.ndarray): the array whose `axis` to be permuted.
permutation_index (numpy.ndarray): 1d numpy array whose size should be
same as permutation axis of `node`.
axis (int): permutation axis.
Returns (numpy.ndarray): permutated `node` array.
"""
if node.shape[axis] != len(permutation_index):
raise ValueError(
'node.shape[{}] = {} and len(permutation_index) = {} do not match!'
.format(axis, node.shape[axis], len(permutation_index)))
out_node = numpy.take(node, permutation_index, axis=axis).copy()
return out_node
def permute_adj(adj, permutation_index, axis=None):
"""Permute index of adjacency matrix array
Args:
adj (numpy.ndarray): the array whose `axis` to be permuted.
It is considered as adjacency matrix.
permutation_index (numpy.ndarray): 1d numpy array whose size should be
same as permutation axis of `node`.
axis (list or tuple or None): list of 2d int, indicates the permutation
axis. When None is passed (default), it uses -1 and -2 as `axis`,
it means that last 2 axis are considered to be permuted.
Returns (numpy.ndarray): permutated `adj` array.
"""
if axis is not None:
if not isinstance(axis, (list, tuple)):
raise TypeError('axis must be list or tuple, got {}'
.format(type(axis)))
if len(axis) != 2:
raise ValueError('axis length must 2, got {}'.format(len(axis)))
else:
axis = [-1, -2] # default value is to use last 2 axis
num_node = len(permutation_index)
for ax in axis:
if adj.shape[ax] != len(permutation_index):
raise ValueError(
'adj.shape[{}] = {} and len(permutation_index) = {} do not '
'match!'.format(axis, adj.shape[axis], len(permutation_index)))
out_adj = numpy.zeros_like(adj)
ndim = adj.ndim
for i in range(num_node):
for j in range(num_node):
in_indices = [slice(None)] * ndim
out_indices = [slice(None)] * ndim
in_indices[axis[0]] = i
in_indices[axis[1]] = j
out_indices[axis[0]] = permutation_index[i]
out_indices[axis[1]] = permutation_index[j]
out_adj[in_indices] = adj[out_indices]
return out_adj
| 37.38806 | 79 | 0.603593 |
c05c6c62691cf1a1fee00855c8cc2995d5212233
| 3,209 |
py
|
Python
|
setup.py
|
frmdstryr/atom
|
37563874ecc732db1554d5a90824a4428413ae7e
|
[
"BSD-3-Clause-Clear"
] | 1 |
2021-04-10T13:32:49.000Z
|
2021-04-10T13:32:49.000Z
|
setup.py
|
frmdstryr/atom
|
37563874ecc732db1554d5a90824a4428413ae7e
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
setup.py
|
frmdstryr/atom
|
37563874ecc732db1554d5a90824a4428413ae7e
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
#------------------------------------------------------------------------------
# Copyright (c) 2013-2019, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
import os
import sys
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
sys.path.insert(0, os.path.abspath('.'))
from atom.version import __version__
ext_modules = [
Extension(
'atom.catom',
[
'atom/src/atomlist.cpp',
'atom/src/atomref.cpp',
'atom/src/catom.cpp',
'atom/src/catommodule.cpp',
'atom/src/defaultvaluebehavior.cpp',
'atom/src/delattrbehavior.cpp',
'atom/src/enumtypes.cpp',
'atom/src/eventbinder.cpp',
'atom/src/getattrbehavior.cpp',
'atom/src/member.cpp',
'atom/src/memberchange.cpp',
'atom/src/methodwrapper.cpp',
'atom/src/observerpool.cpp',
'atom/src/postgetattrbehavior.cpp',
'atom/src/postsetattrbehavior.cpp',
'atom/src/postvalidatebehavior.cpp',
'atom/src/propertyhelper.cpp',
'atom/src/setattrbehavior.cpp',
'atom/src/signalconnector.cpp',
'atom/src/validatebehavior.cpp',
],
language='c++',
),
Extension(
'atom.datastructures.sortedmap',
['atom/src/sortedmap.cpp'],
language='c++',
),
]
class BuildExt(build_ext):
""" A custom build extension for adding compiler-specific options.
"""
c_opts = {
'msvc': ['/EHsc']
}
def initialize_options(self):
build_ext.initialize_options(self)
self.debug = False
def build_extensions(self):
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
for ext in self.extensions:
ext.extra_compile_args = opts
build_ext.build_extensions(self)
setup(
name='atom',
version=__version__,
author='The Nucleic Development Team',
author_email='[email protected]',
url='https://github.com/nucleic/atom',
description='Memory efficient Python objects',
long_description=open('README.rst').read(),
license='BSD',
classifiers=[
# https://pypi.org/pypi?%3Aaction=list_classifiers
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
install_requires=['setuptools'],
packages=find_packages(exclude=['tests', 'tests.*']),
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExt},
)
| 32.744898 | 79 | 0.570583 |
316d7a245fe9c899356815c10029a5d41fb46883
| 1,128 |
py
|
Python
|
camera.py
|
williamliusea/CarND-Advanced-Lane-Lines
|
cccbfdffbafe4d457242bac8a5e74fc0ea38d3b1
|
[
"MIT"
] | null | null | null |
camera.py
|
williamliusea/CarND-Advanced-Lane-Lines
|
cccbfdffbafe4d457242bac8a5e74fc0ea38d3b1
|
[
"MIT"
] | null | null | null |
camera.py
|
williamliusea/CarND-Advanced-Lane-Lines
|
cccbfdffbafe4d457242bac8a5e74fc0ea38d3b1
|
[
"MIT"
] | null | null | null |
import matplotlib.image as mpimg
import glob
import numpy as np
import cv2
import pickle
import sys
class Camera():
def load(self, filename):
self.mtx, self.dist = pickle.load( open( filename, "rb" ) )
def calibration(self, filename = None, nx = 9, ny = 6):
images = glob.glob('camera_cal/*.jpg')
objp = np.zeros((nx*ny,3), np.float32)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2)
objpoints = []
imgpoints = []
for name in images:
img = mpimg.imread(name)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)
if ret == True:
imgpoints.append(corners)
objpoints.append(objp)
ret, self.mtx, self.dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
if (filename != None):
pickle.dump((self.mtx, self.dist), open( filename, "wb"))
def undistort(self, img):
undist = cv2.undistort(img, self.mtx, self.dist, None, self.mtx)
return undist
| 33.176471 | 120 | 0.587766 |
5703f22b1714a6700ba7f6c927c9101fec90ccad
| 1,837 |
py
|
Python
|
xiaobu/robot/constants.py
|
hanyanze/FS_AILPB
|
7756551cf926aa6296ec851dd696c97d56e06bca
|
[
"Apache-2.0"
] | null | null | null |
xiaobu/robot/constants.py
|
hanyanze/FS_AILPB
|
7756551cf926aa6296ec851dd696c97d56e06bca
|
[
"Apache-2.0"
] | null | null | null |
xiaobu/robot/constants.py
|
hanyanze/FS_AILPB
|
7756551cf926aa6296ec851dd696c97d56e06bca
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8-*-
import os
import shutil
# Wukong main directory
APP_PATH = os.path.normpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
LIB_PATH = os.path.join(APP_PATH, "robot")
DATA_PATH = os.path.join(APP_PATH, "static")
TEMP_PATH = os.path.join(APP_PATH, "temp")
TEMPLATE_PATH = os.path.join(APP_PATH, "server", "templates")
PLUGIN_PATH = os.path.join(APP_PATH, "plugins")
DEFAULT_CONFIG_NAME = 'default.yml'
CUSTOM_CONFIG_NAME = 'config.yml'
CONFIG_PATH = os.path.expanduser(
os.getenv('WUKONG_CONFIG', '~/xiaobu-smartHome/.xiaobu')
)
CONTRIB_PATH = os.path.expanduser(
os.getenv('WUKONG_CONFIG', '~/xiaobu-smartHome/.xiaobu/contrib')
)
CUSTOM_PATH = os.path.expanduser(
os.getenv('WUKONG_CONFIG', '~/xiaobu-smartHome/.xiaobu/custom')
)
def getConfigPath():
"""
获取配置文件的路径
returns: 配置文件的存储路径
"""
return os.path.join(CONFIG_PATH, CUSTOM_CONFIG_NAME)
def getQAPath():
"""
获取QA数据集文件的路径
returns: QA数据集文件的存储路径
"""
qa_source = os.path.join(DATA_PATH, 'qa.csv')
qa_dst = os.path.join(CONFIG_PATH, 'qa.csv')
if not os.path.exists(qa_dst):
shutil.copyfile(qa_source, qa_dst)
return qa_dst
def getConfigData(*fname):
"""
获取配置目录下的指定文件的路径
:param *fname: 指定文件名。如果传多个,则自动拼接
:returns: 配置目录下的某个文件的存储路径
"""
return os.path.join(CONFIG_PATH, *fname)
def getData(*fname):
"""
获取资源目录下指定文件的路径
:param *fname: 指定文件名。如果传多个,则自动拼接
:returns: 配置文件的存储路径
"""
return os.path.join(DATA_PATH, *fname)
def getDefaultConfigPath():
return getData(DEFAULT_CONFIG_NAME)
def newConfig():
shutil.copyfile(getDefaultConfigPath(), getConfigPath())
def getHotwordModel(fname):
if os.path.exists(getData(fname)):
return getData(fname)
else:
return getConfigData(fname)
| 23.857143 | 68 | 0.685357 |
e7d4781c8975496cf4f97dd3003d2a1395f3a4cc
| 7,650 |
py
|
Python
|
devel/lib/python2.7/dist-packages/ferns_detector/msg/_ModelStatus.py
|
gentaiscool/ros-vrep-slam
|
afae1b35eb2538fcc9ff68952b85c5e4791c46d2
|
[
"MIT"
] | 39 |
2018-08-28T21:28:07.000Z
|
2022-03-12T10:30:40.000Z
|
devel/lib/python2.7/dist-packages/ferns_detector/msg/_ModelStatus.py
|
zliucr/ros-vrep-slam
|
afae1b35eb2538fcc9ff68952b85c5e4791c46d2
|
[
"MIT"
] | 1 |
2019-03-14T09:23:56.000Z
|
2019-04-27T15:54:53.000Z
|
devel/lib/python2.7/dist-packages/ferns_detector/msg/_ModelStatus.py
|
zliucr/ros-vrep-slam
|
afae1b35eb2538fcc9ff68952b85c5e4791c46d2
|
[
"MIT"
] | 14 |
2018-07-12T06:59:48.000Z
|
2021-03-31T08:27:39.000Z
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from ferns_detector/ModelStatus.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class ModelStatus(genpy.Message):
_md5sum = "6f51fe3ba1a4dacfef45b8036b90a8d3"
_type = "ferns_detector/ModelStatus"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
string model
bool loaded
bool active
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['header','model','loaded','active']
_slot_types = ['std_msgs/Header','string','bool','bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,model,loaded,active
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ModelStatus, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.model is None:
self.model = ''
if self.loaded is None:
self.loaded = False
if self.active is None:
self.active = False
else:
self.header = std_msgs.msg.Header()
self.model = ''
self.loaded = False
self.active = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.model
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2B().pack(_x.loaded, _x.active))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.model = str[start:end].decode('utf-8')
else:
self.model = str[start:end]
_x = self
start = end
end += 2
(_x.loaded, _x.active,) = _get_struct_2B().unpack(str[start:end])
self.loaded = bool(self.loaded)
self.active = bool(self.active)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.model
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2B().pack(_x.loaded, _x.active))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.model = str[start:end].decode('utf-8')
else:
self.model = str[start:end]
_x = self
start = end
end += 2
(_x.loaded, _x.active,) = _get_struct_2B().unpack(str[start:end])
self.loaded = bool(self.loaded)
self.active = bool(self.active)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_2B = None
def _get_struct_2B():
global _struct_2B
if _struct_2B is None:
_struct_2B = struct.Struct("<2B")
return _struct_2B
| 33.552632 | 145 | 0.621307 |
5537d64bd8b1a142b9f516e683e538eedb2c9003
| 9,635 |
py
|
Python
|
configs/example/se.py
|
mandaltj/gem5_chips
|
b9c0c602241ffda7851c1afb32fa01f295bb98fd
|
[
"BSD-3-Clause"
] | 17 |
2019-02-05T03:44:40.000Z
|
2022-03-31T07:34:08.000Z
|
configs/example/se.py
|
mandaltj/gem5_chips
|
b9c0c602241ffda7851c1afb32fa01f295bb98fd
|
[
"BSD-3-Clause"
] | null | null | null |
configs/example/se.py
|
mandaltj/gem5_chips
|
b9c0c602241ffda7851c1afb32fa01f295bb98fd
|
[
"BSD-3-Clause"
] | 12 |
2019-03-15T06:29:25.000Z
|
2022-02-01T19:37:31.000Z
|
# Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Simple test script
#
# "m5 test.py"
from __future__ import print_function
import optparse
import sys
import os
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal, warn
addToPath('../')
from ruby import Ruby
from common import Options
from common import Simulation
from common import CacheConfig
from common import CpuConfig
from common import BPConfig
from common import MemConfig
from common.Caches import *
from common.cpu2000 import *
def get_processes(options):
"""Interprets provided options and returns a list of processes"""
multiprocesses = []
inputs = []
outputs = []
errouts = []
pargs = []
workloads = options.cmd.split(';')
if options.input != "":
inputs = options.input.split(';')
if options.output != "":
outputs = options.output.split(';')
if options.errout != "":
errouts = options.errout.split(';')
if options.options != "":
pargs = options.options.split(';')
idx = 0
for wrkld in workloads:
process = Process(pid = 100 + idx)
process.executable = wrkld
process.cwd = os.getcwd()
if options.env:
with open(options.env, 'r') as f:
process.env = [line.rstrip() for line in f]
if len(pargs) > idx:
process.cmd = [wrkld] + pargs[idx].split()
else:
process.cmd = [wrkld]
if len(inputs) > idx:
process.input = inputs[idx]
if len(outputs) > idx:
process.output = outputs[idx]
if len(errouts) > idx:
process.errout = errouts[idx]
multiprocesses.append(process)
idx += 1
if options.smt:
assert(options.cpu_type == "DerivO3CPU")
return multiprocesses, idx
else:
return multiprocesses, 1
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addSEOptions(parser)
if '--ruby' in sys.argv:
Ruby.define_options(parser)
(options, args) = parser.parse_args()
if args:
print("Error: script doesn't take any positional arguments")
sys.exit(1)
multiprocesses = []
numThreads = 1
if options.bench:
apps = options.bench.split("-")
if len(apps) != options.num_cpus:
print("number of benchmarks not equal to set num_cpus!")
sys.exit(1)
for app in apps:
try:
if buildEnv['TARGET_ISA'] == 'alpha':
exec("workload = %s('alpha', 'tru64', '%s')" % (
app, options.spec_input))
elif buildEnv['TARGET_ISA'] == 'arm':
exec("workload = %s('arm_%s', 'linux', '%s')" % (
app, options.arm_iset, options.spec_input))
else:
exec("workload = %s(buildEnv['TARGET_ISA', 'linux', '%s')" % (
app, options.spec_input))
multiprocesses.append(workload.makeProcess())
except:
print("Unable to find workload for %s: %s" %
(buildEnv['TARGET_ISA'], app),
file=sys.stderr)
sys.exit(1)
elif options.cmd:
multiprocesses, numThreads = get_processes(options)
else:
print("No workload specified. Exiting!\n", file=sys.stderr)
sys.exit(1)
(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
CPUClass.numThreads = numThreads
# Check -- do not allow SMT with multiple CPUs
if options.smt and options.num_cpus > 1:
fatal("You cannot use SMT with multiple CPUs!")
np = options.num_cpus
system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
mem_mode = test_mem_mode,
mem_ranges = [AddrRange(options.mem_size)],
cache_line_size = options.cacheline_size)
if numThreads > 1:
system.multi_thread = True
# Create a top-level voltage domain
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
# Create a CPU voltage domain
system.cpu_voltage_domain = VoltageDomain()
# Create a separate clock domain for the CPUs
system.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
system.cpu_voltage_domain)
# If elastic tracing is enabled, then configure the cpu and attach the elastic
# trace probe
if options.elastic_trace_en:
CpuConfig.config_etrace(CPUClass, system.cpu, options)
# All cpus belong to a common cpu_clk_domain, therefore running at a common
# frequency.
for cpu in system.cpu:
cpu.clk_domain = system.cpu_clk_domain
if CpuConfig.is_kvm_cpu(CPUClass) or CpuConfig.is_kvm_cpu(FutureClass):
if buildEnv['TARGET_ISA'] == 'x86':
system.kvm_vm = KvmVM()
for process in multiprocesses:
process.useArchPT = True
process.kvmInSE = True
else:
fatal("KvmCPU can only be used in SE mode with x86")
# Sanity check
if options.simpoint_profile:
if not CpuConfig.is_noncaching_cpu(CPUClass):
fatal("SimPoint/BPProbe should be done with an atomic cpu")
if np > 1:
fatal("SimPoint generation not supported with more than one CPUs")
for i in xrange(np):
if options.smt:
system.cpu[i].workload = multiprocesses
elif len(multiprocesses) == 1:
system.cpu[i].workload = multiprocesses[0]
else:
system.cpu[i].workload = multiprocesses[i]
if options.simpoint_profile:
system.cpu[i].addSimPointProbe(options.simpoint_interval)
if options.checker:
system.cpu[i].addCheckerCpu()
if options.bp_type:
bpClass = BPConfig.get(options.bp_type)
system.cpu[i].branchPred = bpClass()
system.cpu[i].createThreads()
if options.ruby:
Ruby.create_system(options, False, system)
assert(options.num_cpus == len(system.ruby._cpu_ports))
system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = system.voltage_domain)
for i in xrange(np):
ruby_port = system.ruby._cpu_ports[i]
# Create the interrupt controller and connect its ports to Ruby
# Note that the interrupt controller is always present but only
# in x86 does it have message ports that need to be connected
system.cpu[i].createInterruptController()
# Connect the cpu's cache ports to Ruby
system.cpu[i].icache_port = ruby_port.slave
system.cpu[i].dcache_port = ruby_port.slave
if buildEnv['TARGET_ISA'] == 'x86':
system.cpu[i].interrupts[0].pio = ruby_port.master
system.cpu[i].interrupts[0].int_master = ruby_port.slave
system.cpu[i].interrupts[0].int_slave = ruby_port.master
system.cpu[i].itb.walker.port = ruby_port.slave
system.cpu[i].dtb.walker.port = ruby_port.slave
else:
MemClass = Simulation.setMemClass(options)
system.membus = SystemXBar()
system.system_port = system.membus.slave
CacheConfig.config_cache(options, system)
MemConfig.config_mem(options, system)
m5.ticks.setGlobalFrequency('500ps')
m5.disableAllListeners()
root = Root(full_system = False, system = system)
Simulation.run(options, root, system, FutureClass)
| 34.658273 | 79 | 0.6767 |
7b1219818b3e56cfa4a110d3476bc5d633a4a5f4
| 213 |
py
|
Python
|
SensorApp/apps.py
|
rkmakwana/SensorApp
|
a8ea03b4981ea8607d92a7682201714cb1d6b4ae
|
[
"MIT"
] | null | null | null |
SensorApp/apps.py
|
rkmakwana/SensorApp
|
a8ea03b4981ea8607d92a7682201714cb1d6b4ae
|
[
"MIT"
] | null | null | null |
SensorApp/apps.py
|
rkmakwana/SensorApp
|
a8ea03b4981ea8607d92a7682201714cb1d6b4ae
|
[
"MIT"
] | null | null | null |
from importlib import import_module
from django.apps import AppConfig as BaseAppConfig
class AppConfig(BaseAppConfig):
name = "SensorApp"
def ready(self):
import_module("SensorApp.receivers")
| 17.75 | 50 | 0.746479 |
66664fe09304a80eefc560e1429ec847ffd02502
| 303 |
py
|
Python
|
tests/test_unsubscribe.py
|
Squad002/GoOutSafe-Monolith
|
4ee5a32668730626ee5bd314c98515e7d7253e13
|
[
"MIT"
] | null | null | null |
tests/test_unsubscribe.py
|
Squad002/GoOutSafe-Monolith
|
4ee5a32668730626ee5bd314c98515e7d7253e13
|
[
"MIT"
] | null | null | null |
tests/test_unsubscribe.py
|
Squad002/GoOutSafe-Monolith
|
4ee5a32668730626ee5bd314c98515e7d7253e13
|
[
"MIT"
] | null | null | null |
from urllib.parse import urlparse
from datetime import date
from flask import session
from .fixtures import app, client, db
from . import helpers
from monolith.models import User
def test_unsubscribe_view_is_available(client):
res = client.get("/unsubscribe")
assert res.status_code == 302
| 20.2 | 47 | 0.778878 |
6306c56584d39080f88d446782a6590a48b715d0
| 482 |
py
|
Python
|
data/scripts/templates/object/tangible/item/quest/hero_of_tatooine/shared_explosives.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20 |
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/tangible/item/quest/hero_of_tatooine/shared_explosives.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/tangible/item/quest/hero_of_tatooine/shared_explosives.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20 |
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/item/quest/hero_of_tatooine/shared_explosives.iff"
result.attribute_template_id = -1
result.stfName("quest_item_n","hero_of_tatooine_explosives")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 28.352941 | 86 | 0.742739 |
7c8dd78e8d151a58b4d131817361f5a11ed96177
| 8,456 |
py
|
Python
|
yolo.py
|
AaronJny/tf2-keras-yolo3
|
c68daf377f4155d8703d2b05bcc61d8adabf3115
|
[
"MIT"
] | 36 |
2019-12-22T03:35:00.000Z
|
2022-01-12T03:57:44.000Z
|
yolo.py
|
LiAnGGGGGG/tf2-keras-yolo3
|
c68daf377f4155d8703d2b05bcc61d8adabf3115
|
[
"MIT"
] | 2 |
2020-09-22T16:57:13.000Z
|
2021-08-25T02:31:53.000Z
|
yolo.py
|
LiAnGGGGGG/tf2-keras-yolo3
|
c68daf377f4155d8703d2b05bcc61d8adabf3115
|
[
"MIT"
] | 19 |
2019-12-24T01:35:52.000Z
|
2021-12-10T09:24:24.000Z
|
# -*- coding: utf-8 -*-
"""
Class definition of YOLO_v3 style detection model on image and video
"""
import colorsys
import os
from timeit import default_timer as timer
import numpy as np
import tensorflow as tf
from PIL import Image, ImageFont, ImageDraw
from tensorflow.keras.layers import Input
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import multi_gpu_model
import settings
from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body
from yolo3.utils import letterbox_image
class YOLO(object):
_defaults = {
"model_path": settings.DEFAULT_MODEL_PATH,
"anchors_path": settings.DEFAULT_ANCHORS_PATH,
"classes_path": settings.DEFAULT_CLASSES_PATH,
"score": settings.SCORE,
"iou": settings.IOU,
"model_image_size": settings.MODEL_IMAGE_SIZE,
"gpu_num": settings.GPU_NUM,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.load_yolo_model()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def load_yolo_model(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors == 6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None, None, 3)), num_anchors // 2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None, None, 3)), num_anchors // 3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors / len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
@tf.function
def compute_output(self, image_data, image_shape):
# Generate output tensor targets for filtered bounding boxes.
# self.input_image_shape = K.placeholder(shape=(2,))
self.input_image_shape = tf.constant(image_shape)
if self.gpu_num >= 2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)
boxes, scores, classes = yolo_eval(self.yolo_model(image_data), self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
start = timer()
if self.model_image_size != (None, None):
assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.compute_output(image_data, [image.size[1], image.size[0]])
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
end = timer()
print(end - start)
return image
def detect_video(yolo, video_path, output_path=""):
import cv2
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| 41.048544 | 110 | 0.607025 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.