hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1fdeb0491fcc982157101202d91f186c2a320c5d
| 17 |
py
|
Python
|
practice.py
|
wwwxxxx/firstPythonGit
|
a1b5e41153e74a700ac6de38df2676c585739358
|
[
"Apache-2.0"
] | null | null | null |
practice.py
|
wwwxxxx/firstPythonGit
|
a1b5e41153e74a700ac6de38df2676c585739358
|
[
"Apache-2.0"
] | null | null | null |
practice.py
|
wwwxxxx/firstPythonGit
|
a1b5e41153e74a700ac6de38df2676c585739358
|
[
"Apache-2.0"
] | null | null | null |
print("第一个git项目")
| 17 | 17 | 0.764706 |
b94c5d4c4bcd6bd8876a04de040f38c222d0cd49
| 1,662 |
py
|
Python
|
mindspore/ops/_op_impl/aicpu/strided_slice_grad.py
|
i4oolish/mindspore
|
dac3be31d0f2c0a3516200f47af30980e566601b
|
[
"Apache-2.0"
] | 2 |
2020-08-12T16:14:40.000Z
|
2020-12-04T03:05:57.000Z
|
mindspore/ops/_op_impl/aicpu/strided_slice_grad.py
|
dilingsong/mindspore
|
4276050f2494cfbf8682560a1647576f859991e8
|
[
"Apache-2.0"
] | null | null | null |
mindspore/ops/_op_impl/aicpu/strided_slice_grad.py
|
dilingsong/mindspore
|
4276050f2494cfbf8682560a1647576f859991e8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""StridedSliceGrad op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
strided_slice_grad_op_info = AiCPURegOp("StridedSliceGradAICPU") \
.fusion_type("OPAQUE") \
.input(0, "dy", "required") \
.input(1, "shape", "required") \
.input(2, "begin", "required") \
.input(3, "end", "required") \
.input(4, "stride", "required") \
.output(0, "output", "required") \
.attr("begin_mask", "int") \
.attr("end_mask", "int") \
.attr("ellipsis_mask", "int") \
.attr("new_axis_mask", "int") \
.attr("shrink_axis_mask", "int") \
.dtype_format(DataType.F32_Default,
DataType.I32_Default,
DataType.I32_Default,
DataType.I32_Default,
DataType.I32_Default,
DataType.F32_Default) \
.get_op_info()
@op_info_register(strided_slice_grad_op_info)
def _strided_slice_grad_aicpu():
"""StridedSliceGrad AiCPU register"""
return
| 37.772727 | 81 | 0.640794 |
2b17d5e2a9d621b3686fe7206a5877c3e0822a31
| 4,302 |
py
|
Python
|
rasa/engine/runner/dask.py
|
tienhoang1994/rasa
|
a977a8f1fb2308bebe7becd2c024765528296bd6
|
[
"Apache-2.0"
] | 3,603 |
2017-05-21T18:34:55.000Z
|
2019-04-16T11:58:09.000Z
|
rasa/engine/runner/dask.py
|
tienhoang1994/rasa
|
a977a8f1fb2308bebe7becd2c024765528296bd6
|
[
"Apache-2.0"
] | 2,782 |
2017-05-21T20:36:15.000Z
|
2019-04-16T14:35:20.000Z
|
rasa/engine/runner/dask.py
|
tienhoang1994/rasa
|
a977a8f1fb2308bebe7becd2c024765528296bd6
|
[
"Apache-2.0"
] | 1,337 |
2017-05-21T18:10:33.000Z
|
2019-04-16T09:14:42.000Z
|
from __future__ import annotations
import logging
from typing import Any, Dict, List, Optional, Text
import dask
from rasa.engine.exceptions import GraphRunError
from rasa.engine.graph import ExecutionContext, GraphNode, GraphNodeHook, GraphSchema
from rasa.engine.runner.interface import GraphRunner
from rasa.engine.storage.storage import ModelStorage
logger = logging.getLogger(__name__)
class DaskGraphRunner(GraphRunner):
"""Dask implementation of a `GraphRunner`."""
def __init__(
self,
graph_schema: GraphSchema,
model_storage: ModelStorage,
execution_context: ExecutionContext,
hooks: Optional[List[GraphNodeHook]] = None,
) -> None:
"""Initializes a `DaskGraphRunner`.
Args:
graph_schema: The graph schema that will be run.
model_storage: Storage which graph components can use to persist and load
themselves.
execution_context: Information about the current graph run to be passed to
each node.
hooks: These are called before and after the execution of each node.
"""
self._graph_schema = graph_schema
self._instantiated_nodes: Dict[Text, GraphNode] = self._instantiate_nodes(
graph_schema, model_storage, execution_context, hooks
)
self._execution_context: ExecutionContext = execution_context
@classmethod
def create(
cls,
graph_schema: GraphSchema,
model_storage: ModelStorage,
execution_context: ExecutionContext,
hooks: Optional[List[GraphNodeHook]] = None,
) -> DaskGraphRunner:
"""Creates the runner (see parent class for full docstring)."""
return cls(graph_schema, model_storage, execution_context, hooks)
@staticmethod
def _instantiate_nodes(
graph_schema: GraphSchema,
model_storage: ModelStorage,
execution_context: ExecutionContext,
hooks: Optional[List[GraphNodeHook]] = None,
) -> Dict[Text, GraphNode]:
return {
node_name: GraphNode.from_schema_node(
node_name, schema_node, model_storage, execution_context, hooks
)
for node_name, schema_node in graph_schema.nodes.items()
}
def _build_dask_graph(self, schema: GraphSchema) -> Dict[Text, Any]:
"""Builds a dask graph from the instantiated graph.
For more information about dask graphs
see: https://docs.dask.org/en/latest/spec.html
"""
run_graph = {
node_name: (
self._instantiated_nodes[node_name],
*schema_node.needs.values(),
)
for node_name, schema_node in schema.nodes.items()
}
return run_graph
def run(
self,
inputs: Optional[Dict[Text, Any]] = None,
targets: Optional[List[Text]] = None,
) -> Dict[Text, Any]:
"""Runs the graph (see parent class for full docstring)."""
run_targets = targets if targets else self._graph_schema.target_names
minimal_schema = self._graph_schema.minimal_graph_schema(run_targets)
run_graph = self._build_dask_graph(minimal_schema)
if inputs:
self._add_inputs_to_graph(inputs, run_graph)
logger.debug(
f"Running graph with inputs: {inputs}, targets: {targets} "
f"and {self._execution_context}."
)
try:
dask_result = dask.get(run_graph, run_targets)
return dict(dask_result)
except RuntimeError as e:
raise GraphRunError("Error running runner.") from e
@staticmethod
def _add_inputs_to_graph(inputs: Optional[Dict[Text, Any]], graph: Any) -> None:
if inputs is None:
return
for input_name, input_value in inputs.items():
if isinstance(input_value, str) and input_value in graph.keys():
raise GraphRunError(
f"Input value '{input_value}' clashes with a node name. Make sure "
f"that none of the input names passed to the `run` method are the "
f"same as node names in the graph schema."
)
graph[input_name] = (input_name, input_value)
| 36.151261 | 87 | 0.637378 |
3636494fbd93c178104e38d7581eb3ec92bec8ba
| 2,227 |
py
|
Python
|
spacy/cli/info.py
|
cmgreivel/spaCy
|
a31506e06060c559abfeda043503935691af2e98
|
[
"MIT"
] | 2 |
2019-07-08T17:09:22.000Z
|
2021-01-09T17:36:04.000Z
|
spacy/cli/info.py
|
cmgreivel/spaCy
|
a31506e06060c559abfeda043503935691af2e98
|
[
"MIT"
] | null | null | null |
spacy/cli/info.py
|
cmgreivel/spaCy
|
a31506e06060c559abfeda043503935691af2e98
|
[
"MIT"
] | null | null | null |
# coding: utf8
from __future__ import unicode_literals
import plac
import platform
from pathlib import Path
from ..compat import path2str
from .. import about
from .. import util
@plac.annotations(
model=("optional: shortcut link of model", "positional", None, str),
markdown=("generate Markdown for GitHub issues", "flag", "md", str))
def info(cmd, model=None, markdown=False):
"""Print info about spaCy installation. If a model shortcut link is
speficied as an argument, print model information. Flag --markdown
prints details in Markdown for easy copy-pasting to GitHub issues.
"""
if model:
if util.is_package(model):
model_path = util.get_package_path(model)
else:
model_path = util.get_data_path() / model
meta_path = model_path / 'meta.json'
if not meta_path.is_file():
util.prints(meta_path, title="Can't find model meta.json", exits=1)
meta = util.read_json(meta_path)
if model_path.resolve() != model_path:
meta['link'] = path2str(model_path)
meta['source'] = path2str(model_path.resolve())
else:
meta['source'] = path2str(model_path)
print_info(meta, 'model %s' % model, markdown)
else:
data = {'spaCy version': about.__version__,
'Location': path2str(Path(__file__).parent.parent),
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Models': list_models()}
print_info(data, 'spaCy', markdown)
def print_info(data, title, markdown):
title = 'Info about %s' % title
if markdown:
util.print_markdown(data, title=title)
else:
util.print_table(data, title=title)
def list_models():
def exclude_dir(dir_name):
# exclude common cache directories and hidden directories
exclude = ['cache', 'pycache', '__pycache__']
return dir_name in exclude or dir_name.startswith('.')
data_path = util.get_data_path()
if data_path:
models = [f.parts[-1] for f in data_path.iterdir() if f.is_dir()]
return ', '.join([m for m in models if not exclude_dir(m)])
return '-'
| 35.349206 | 79 | 0.636731 |
7fcf1ad0bfd0105026c3f216d01ac5101a11f7ef
| 2,237 |
py
|
Python
|
threads/motion_sensors_thread.py
|
sensidev/pir-tof-camera-websoket-example
|
911da83d2ba44857d3fa0843ba57fd0576c23a5e
|
[
"MIT"
] | null | null | null |
threads/motion_sensors_thread.py
|
sensidev/pir-tof-camera-websoket-example
|
911da83d2ba44857d3fa0843ba57fd0576c23a5e
|
[
"MIT"
] | 1 |
2020-05-17T13:27:13.000Z
|
2020-05-17T13:27:13.000Z
|
threads/motion_sensors_thread.py
|
sensidev/pir-tof-camera-websoket-example
|
911da83d2ba44857d3fa0843ba57fd0576c23a5e
|
[
"MIT"
] | 1 |
2019-10-27T10:52:29.000Z
|
2019-10-27T10:52:29.000Z
|
import json
from threading import Thread
from time import time
from gpiozero import MotionSensor
class MotionSensorsThread(Thread):
"""
Thread sampling PIR sensors and broadcast whenever one of the sensors detect motion.
"""
WHEN_MOTION_VALUE = 1000
def __init__(self, websocket_server, sensors=None):
"""
Initiate PIR sensors.
:param websocket_server: Websocket server
:param sensors: list of objects like {'pin': 4}
"""
super(MotionSensorsThread, self).__init__()
print('Initializing motion sensors thread')
self.websocket_server = websocket_server
self.should_run = True
self.sensors = sensors
self._init_sensors_state_dict()
for s in self.sensors:
s['instance'] = MotionSensor(s.get('pin'))
s['instance'].when_motion = lambda instance: self._detect_event_for(instance)
def run(self):
try:
while self.should_run:
pass
finally:
pass
def stop(self):
print('Finishing motion sensors thread ...')
self.should_run = False
def _get_payload(self):
payload = {
"type": "motion",
"samples": []
}
for i, s in enumerate(self.sensors, start=1):
pin = s.get('pin')
payload['samples'].append({
'id': 'motion-sensor-{}'.format(i),
'pin': pin,
'sample': self.sensor_state_dict[pin],
})
return payload
def _detect_event_for(self, sensor_instance):
print('Detect Event For PIR sensor: {}'.format(sensor_instance.pin.number))
print('Motion detected: {}'.format(sensor_instance.motion_detected))
self.sensor_state_dict[sensor_instance.pin.number] = {
'value': self.WHEN_MOTION_VALUE if sensor_instance.motion_detected else 0,
'timestamp': time()
}
self.websocket_server.manager.broadcast(json.dumps(self._get_payload()), binary=False)
def _init_sensors_state_dict(self):
self.sensor_state_dict = {
s.get('pin'): {'value': 0, 'timestamp': time()} for s in self.sensors
}
| 28.316456 | 94 | 0.599017 |
ee4cdd2ba19d79a568ecc159d5152427ee637105
| 2,353 |
py
|
Python
|
neuro_flow/cli/completion.py
|
neuromation/neuro-flow
|
c29d61247a4afa3b341474d226e08e976f59345c
|
[
"Apache-2.0"
] | 3 |
2020-08-27T09:02:15.000Z
|
2020-09-29T09:19:57.000Z
|
neuro_flow/cli/completion.py
|
neuromation/neuro-flow
|
c29d61247a4afa3b341474d226e08e976f59345c
|
[
"Apache-2.0"
] | 31 |
2020-08-05T12:55:24.000Z
|
2020-09-28T18:07:33.000Z
|
neuro_flow/cli/completion.py
|
neuromation/neuro-flow
|
c29d61247a4afa3b341474d226e08e976f59345c
|
[
"Apache-2.0"
] | null | null | null |
import click
import os
import sys
from neuro_flow.cli.utils import wrap_async
from neuro_flow.types import LocalPath
from .root import Root
CFG_FILE = {"bash": LocalPath("~/.bashrc"), "zsh": LocalPath("~/.zshrc")}
SOURCE_CMD = {"bash": "bash_source", "zsh": "zsh_source"}
ACTIVATION_TEMPLATE = 'eval "$(_NEURO_FLOW_COMPLETE={cmd} {exe})"'
@click.group()
def completion() -> None:
"""
Output shell completion code.
"""
@click.command()
@click.argument("shell", type=click.Choice(["bash", "zsh"]))
@wrap_async()
async def generate(root: Root, shell: str) -> None:
"""
Provide instruction for shell completion generation.
"""
root.console.print(f"Push the following line into your {CFG_FILE[shell]}")
root.console.print(
ACTIVATION_TEMPLATE.format(cmd=SOURCE_CMD[shell], exe=sys.argv[0])
)
@click.command()
@click.argument("shell", type=click.Choice(["bash", "zsh"]))
@wrap_async()
async def patch(root: Root, shell: str) -> None:
"""
Automatically patch shell configuration profile to enable completion
"""
GENERATED_START = (
b"# Start of generated by neuro-flow. Please do not edit this comment.\n"
)
GENERATED_END = (
b"\n# End of generated by neuro-flow. Please do not edit this comment.\n"
)
profile_file = CFG_FILE[shell].expanduser()
code = (
GENERATED_START
+ os.fsencode(
ACTIVATION_TEMPLATE.format(cmd=SOURCE_CMD[shell], exe=sys.argv[0])
)
+ GENERATED_END
)
try:
with profile_file.open("rb+") as profile:
content = profile.read()
except FileNotFoundError:
content = b""
start = content.find(GENERATED_START)
if start != -1:
end = content.find(GENERATED_END)
if end == -1:
raise click.ClickException(
f"Malformed guarding comments. Please edit {profile_file} manually"
)
content = content[:start] + code + content[end + len(GENERATED_END) :]
else:
if content != b"":
content += b"\n" + code
else:
content = code
with profile_file.open("wb+") as profile:
profile.write(content)
root.console.print(f"Added completion configuration into '{profile_file}'")
completion.add_command(generate)
completion.add_command(patch)
| 26.738636 | 83 | 0.632809 |
c5e1a4d0b8f06a60d0529befdabc11ec2e62c841
| 2,846 |
py
|
Python
|
frete/api/modules/frete.py
|
Leothi/teste-kabum
|
37bc5566e13ab68c6fd0adf3b700632395481e17
|
[
"MIT"
] | 1 |
2021-03-18T16:26:35.000Z
|
2021-03-18T16:26:35.000Z
|
frete/api/modules/frete.py
|
Leothi/teste-kabum
|
37bc5566e13ab68c6fd0adf3b700632395481e17
|
[
"MIT"
] | null | null | null |
frete/api/modules/frete.py
|
Leothi/teste-kabum
|
37bc5566e13ab68c6fd0adf3b700632395481e17
|
[
"MIT"
] | null | null | null |
from loguru import logger
class CalculadorFrete():
cfg_frete = {
'ninja': {
'nome': 'Entrega Ninja',
'cte_frete': 0.3,
'altura_min': 10,
'altura_max': 200,
'largura_min': 6,
'largura_max': 140,
'prazo_dias': 6,
},
'kabum': {
'nome': 'Entrega KaBuM',
'cte_frete': 0.2,
'altura_min': 5,
'altura_max': 140,
'largura_min': 13,
'largura_max': 125,
'prazo_dias': 4,
},
}
@classmethod
def validar(cls, dimensao: dict, peso: int) -> list:
"""Valida o produto através de suas características.
:param dimensao: Dimensões do produto.
:type dimensao: dict
:param peso: Peso do produto.
:type peso: int
:return: Lista de fretes válidos para o produto.
:rtype: list
"""
logger.info("Validando dimensões do produto.")
tipos_validos = []
cfg_dict = cls.cfg_frete
# Validação de peso
if not peso <= 0:
for key, value in cfg_dict.items():
# Validação de altura e largura
altura_valida = cfg_dict[key]['altura_min'] <= dimensao['altura'] <= cfg_dict[key]['altura_max']
largura_valida = cfg_dict[key]['largura_min'] <= dimensao['largura'] <= cfg_dict[key]['largura_max']
if altura_valida and largura_valida:
tipos_validos.append(key)
logger.debug('Validação concluída.')
return tipos_validos
@classmethod
def criar_lista_fretes(cls, body: dict) -> list:
"""Cria a lista de fretes disponíveis para o produto.
:param body: Dicionário da requisição contendo informações do produto.
:type body: dict
:return: Lista de dicionários contendo os resultados dos cálculos de frete.
:rtype: list
"""
tipos_validos = cls.validar(**body)
lista_fretes = []
if tipos_validos:
logger.info("Criando lista de fretes.")
for tipo in tipos_validos:
cfg_dict = cls.cfg_frete
# Calculando frete
cte_frete = cfg_dict[tipo]['cte_frete']
frete = cte_frete * body['peso'] / 10
# Dicionário final
dict_frete = {
'nome': cfg_dict[tipo]['nome'],
'valor_frete': frete,
'prazo_dias': cfg_dict[tipo]['prazo_dias']
}
# Lista com os dicionários finais
lista_fretes.append(dict_frete)
logger.success("Lista criada.")
else:
logger.success("Dimensões inválidas para os fretes atuais.")
return lista_fretes
| 30.602151 | 116 | 0.531623 |
abd9cecd9ac6d71a5192a06c3bd86641ff64abe5
| 20,076 |
py
|
Python
|
tests/test_setup.py
|
squirrel289/core
|
6c5bcbfc3ee40927458e9188d6b79bf63933d3f9
|
[
"Apache-2.0"
] | 3 |
2019-11-13T18:19:33.000Z
|
2021-07-18T11:40:37.000Z
|
tests/test_setup.py
|
squirrel289/core
|
6c5bcbfc3ee40927458e9188d6b79bf63933d3f9
|
[
"Apache-2.0"
] | 45 |
2020-07-21T12:58:24.000Z
|
2022-03-31T06:01:46.000Z
|
tests/test_setup.py
|
CrossEyeORG/homeassistant
|
6c5bcbfc3ee40927458e9188d6b79bf63933d3f9
|
[
"Apache-2.0"
] | 2 |
2017-09-03T16:06:02.000Z
|
2021-01-12T15:07:52.000Z
|
"""Test component/platform setup."""
# pylint: disable=protected-access
import asyncio
import logging
import os
import threading
import pytest
import voluptuous as vol
from homeassistant import config_entries, setup
import homeassistant.config as config_util
from homeassistant.const import EVENT_COMPONENT_LOADED, EVENT_HOMEASSISTANT_START
from homeassistant.core import callback
from homeassistant.helpers import discovery
from homeassistant.helpers.config_validation import (
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
import homeassistant.util.dt as dt_util
from tests.async_mock import Mock, patch
from tests.common import (
MockConfigEntry,
MockModule,
MockPlatform,
assert_setup_component,
get_test_config_dir,
get_test_home_assistant,
mock_entity_platform,
mock_integration,
)
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)
_LOGGER = logging.getLogger(__name__)
@pytest.fixture(autouse=True)
def mock_handlers():
"""Mock config flows."""
class MockFlowHandler(config_entries.ConfigFlow):
"""Define a mock flow handler."""
VERSION = 1
with patch.dict(config_entries.HANDLERS, {"comp": MockFlowHandler}):
yield
class TestSetup:
"""Test the bootstrap utils."""
hass = None
backup_cache = None
# pylint: disable=invalid-name, no-self-use
def setup_method(self, method):
"""Set up the test."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Clean up."""
self.hass.stop()
def test_validate_component_config(self):
"""Test validating component configuration."""
config_schema = vol.Schema({"comp_conf": {"hello": str}}, required=True)
mock_integration(
self.hass, MockModule("comp_conf", config_schema=config_schema)
)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, "comp_conf", {})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(
self.hass, "comp_conf", {"comp_conf": None}
)
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, "comp_conf", {"comp_conf": {}})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(
self.hass,
"comp_conf",
{"comp_conf": {"hello": "world", "invalid": "extra"}},
)
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(1):
assert setup.setup_component(
self.hass, "comp_conf", {"comp_conf": {"hello": "world"}}
)
def test_validate_platform_config(self, caplog):
"""Test validating platform configuration."""
platform_schema = PLATFORM_SCHEMA.extend({"hello": str})
platform_schema_base = PLATFORM_SCHEMA_BASE.extend({})
mock_integration(
self.hass,
MockModule("platform_conf", platform_schema_base=platform_schema_base),
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform(platform_schema=platform_schema),
)
with assert_setup_component(0):
assert setup.setup_component(
self.hass,
"platform_conf",
{"platform_conf": {"platform": "not_existing", "hello": "world"}},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{"platform_conf": {"platform": "whatever", "hello": "world"}},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{"platform_conf": [{"platform": "whatever", "hello": "world"}]},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
# Any falsey platform config will be ignored (None, {}, etc)
with assert_setup_component(0) as config:
assert setup.setup_component(
self.hass, "platform_conf", {"platform_conf": None}
)
assert "platform_conf" in self.hass.config.components
assert not config["platform_conf"] # empty
assert setup.setup_component(
self.hass, "platform_conf", {"platform_conf": {}}
)
assert "platform_conf" in self.hass.config.components
assert not config["platform_conf"] # empty
def test_validate_platform_config_2(self, caplog):
"""Test component PLATFORM_SCHEMA_BASE prio over PLATFORM_SCHEMA."""
platform_schema = PLATFORM_SCHEMA.extend({"hello": str})
platform_schema_base = PLATFORM_SCHEMA_BASE.extend({"hello": "world"})
mock_integration(
self.hass,
MockModule(
"platform_conf",
platform_schema=platform_schema,
platform_schema_base=platform_schema_base,
),
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform("whatever", platform_schema=platform_schema),
)
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{
# pass
"platform_conf": {"platform": "whatever", "hello": "world"},
# fail: key hello violates component platform_schema_base
"platform_conf 2": {"platform": "whatever", "hello": "there"},
},
)
def test_validate_platform_config_3(self, caplog):
"""Test fallback to component PLATFORM_SCHEMA."""
component_schema = PLATFORM_SCHEMA_BASE.extend({"hello": str})
platform_schema = PLATFORM_SCHEMA.extend({"cheers": str, "hello": "world"})
mock_integration(
self.hass, MockModule("platform_conf", platform_schema=component_schema)
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform("whatever", platform_schema=platform_schema),
)
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{
# pass
"platform_conf": {"platform": "whatever", "hello": "world"},
# fail: key hello violates component platform_schema
"platform_conf 2": {"platform": "whatever", "hello": "there"},
},
)
def test_validate_platform_config_4(self):
"""Test entity_namespace in PLATFORM_SCHEMA."""
component_schema = PLATFORM_SCHEMA_BASE
platform_schema = PLATFORM_SCHEMA
mock_integration(
self.hass,
MockModule("platform_conf", platform_schema_base=component_schema),
)
mock_entity_platform(
self.hass,
"platform_conf.whatever",
MockPlatform(platform_schema=platform_schema),
)
with assert_setup_component(1):
assert setup.setup_component(
self.hass,
"platform_conf",
{
"platform_conf": {
# pass: entity_namespace accepted by PLATFORM_SCHEMA
"platform": "whatever",
"entity_namespace": "yummy",
}
},
)
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("platform_conf")
def test_component_not_found(self):
"""setup_component should not crash if component doesn't exist."""
assert setup.setup_component(self.hass, "non_existing", {}) is False
def test_component_not_double_initialized(self):
"""Test we do not set up a component twice."""
mock_setup = Mock(return_value=True)
mock_integration(self.hass, MockModule("comp", setup=mock_setup))
assert setup.setup_component(self.hass, "comp", {})
assert mock_setup.called
mock_setup.reset_mock()
assert setup.setup_component(self.hass, "comp", {})
assert not mock_setup.called
@patch("homeassistant.util.package.install_package", return_value=False)
def test_component_not_installed_if_requirement_fails(self, mock_install):
"""Component setup should fail if requirement can't install."""
self.hass.config.skip_pip = False
mock_integration(self.hass, MockModule("comp", requirements=["package==0.0.1"]))
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
def test_component_not_setup_twice_if_loaded_during_other_setup(self):
"""Test component setup while waiting for lock is not set up twice."""
result = []
async def async_setup(hass, config):
"""Tracking Setup."""
result.append(1)
mock_integration(self.hass, MockModule("comp", async_setup=async_setup))
def setup_component():
"""Set up the component."""
setup.setup_component(self.hass, "comp", {})
thread = threading.Thread(target=setup_component)
thread.start()
setup.setup_component(self.hass, "comp", {})
thread.join()
assert len(result) == 1
def test_component_not_setup_missing_dependencies(self):
"""Test we do not set up a component if not all dependencies loaded."""
deps = ["maybe_existing"]
mock_integration(self.hass, MockModule("comp", dependencies=deps))
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(self.hass, MockModule("comp2", dependencies=deps))
mock_integration(self.hass, MockModule("maybe_existing"))
assert setup.setup_component(self.hass, "comp2", {})
def test_component_failing_setup(self):
"""Test component that fails setup."""
mock_integration(
self.hass, MockModule("comp", setup=lambda hass, config: False)
)
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
def test_component_exception_setup(self):
"""Test component that raises exception during setup."""
def exception_setup(hass, config):
"""Raise exception."""
raise Exception("fail!")
mock_integration(self.hass, MockModule("comp", setup=exception_setup))
assert not setup.setup_component(self.hass, "comp", {})
assert "comp" not in self.hass.config.components
def test_component_setup_with_validation_and_dependency(self):
"""Test all config is passed to dependencies."""
def config_check_setup(hass, config):
"""Test that config is passed in."""
if config.get("comp_a", {}).get("valid", False):
return True
raise Exception(f"Config not passed in: {config}")
platform = MockPlatform()
mock_integration(self.hass, MockModule("comp_a", setup=config_check_setup))
mock_integration(
self.hass,
MockModule("platform_a", setup=config_check_setup, dependencies=["comp_a"]),
)
mock_entity_platform(self.hass, "switch.platform_a", platform)
setup.setup_component(
self.hass,
"switch",
{"comp_a": {"valid": True}, "switch": {"platform": "platform_a"}},
)
self.hass.block_till_done()
assert "comp_a" in self.hass.config.components
def test_platform_specific_config_validation(self):
"""Test platform that specifies config."""
platform_schema = PLATFORM_SCHEMA.extend(
{"valid": True}, extra=vol.PREVENT_EXTRA
)
mock_setup = Mock(spec_set=True)
mock_entity_platform(
self.hass,
"switch.platform_a",
MockPlatform(platform_schema=platform_schema, setup_platform=mock_setup),
)
with assert_setup_component(0, "switch"):
assert setup.setup_component(
self.hass,
"switch",
{"switch": {"platform": "platform_a", "invalid": True}},
)
self.hass.block_till_done()
assert mock_setup.call_count == 0
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("switch")
with assert_setup_component(0):
assert setup.setup_component(
self.hass,
"switch",
{
"switch": {
"platform": "platform_a",
"valid": True,
"invalid_extra": True,
}
},
)
self.hass.block_till_done()
assert mock_setup.call_count == 0
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove("switch")
with assert_setup_component(1, "switch"):
assert setup.setup_component(
self.hass,
"switch",
{"switch": {"platform": "platform_a", "valid": True}},
)
self.hass.block_till_done()
assert mock_setup.call_count == 1
def test_disable_component_if_invalid_return(self):
"""Test disabling component if invalid return."""
mock_integration(
self.hass, MockModule("disabled_component", setup=lambda hass, config: None)
)
assert not setup.setup_component(self.hass, "disabled_component", {})
assert "disabled_component" not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(
self.hass,
MockModule("disabled_component", setup=lambda hass, config: False),
)
assert not setup.setup_component(self.hass, "disabled_component", {})
assert "disabled_component" not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(
self.hass, MockModule("disabled_component", setup=lambda hass, config: True)
)
assert setup.setup_component(self.hass, "disabled_component", {})
assert "disabled_component" in self.hass.config.components
def test_all_work_done_before_start(self):
"""Test all init work done till start."""
call_order = []
def component1_setup(hass, config):
"""Set up mock component."""
discovery.discover(hass, "test_component2", {}, "test_component2", {})
discovery.discover(hass, "test_component3", {}, "test_component3", {})
return True
def component_track_setup(hass, config):
"""Set up mock component."""
call_order.append(1)
return True
mock_integration(
self.hass, MockModule("test_component1", setup=component1_setup)
)
mock_integration(
self.hass, MockModule("test_component2", setup=component_track_setup)
)
mock_integration(
self.hass, MockModule("test_component3", setup=component_track_setup)
)
@callback
def track_start(event):
"""Track start event."""
call_order.append(2)
self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START, track_start)
self.hass.add_job(setup.async_setup_component(self.hass, "test_component1", {}))
self.hass.block_till_done()
self.hass.start()
assert call_order == [1, 1, 2]
async def test_component_warn_slow_setup(hass):
"""Warn we log when a component setup takes a long time."""
mock_integration(hass, MockModule("test_component1"))
with patch.object(hass.loop, "call_later") as mock_call:
result = await setup.async_setup_component(hass, "test_component1", {})
assert result
assert mock_call.called
assert len(mock_call.mock_calls) == 3
timeout, logger_method = mock_call.mock_calls[0][1][:2]
assert timeout == setup.SLOW_SETUP_WARNING
assert logger_method == setup._LOGGER.warning
assert mock_call().cancel.called
async def test_platform_no_warn_slow(hass):
"""Do not warn for long entity setup time."""
mock_integration(
hass, MockModule("test_component1", platform_schema=PLATFORM_SCHEMA)
)
with patch.object(hass.loop, "call_later") as mock_call:
result = await setup.async_setup_component(hass, "test_component1", {})
assert result
assert len(mock_call.mock_calls) == 0
async def test_platform_error_slow_setup(hass, caplog):
"""Don't block startup more than SLOW_SETUP_MAX_WAIT."""
with patch.object(setup, "SLOW_SETUP_MAX_WAIT", 1):
called = []
async def async_setup(*args):
"""Tracking Setup."""
called.append(1)
await asyncio.sleep(2)
mock_integration(hass, MockModule("test_component1", async_setup=async_setup))
result = await setup.async_setup_component(hass, "test_component1", {})
assert len(called) == 1
assert not result
assert "test_component1 is taking longer than 1 seconds" in caplog.text
async def test_when_setup_already_loaded(hass):
"""Test when setup."""
calls = []
async def mock_callback(hass, component):
"""Mock callback."""
calls.append(component)
setup.async_when_setup(hass, "test", mock_callback)
await hass.async_block_till_done()
assert calls == []
hass.config.components.add("test")
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {"component": "test"})
await hass.async_block_till_done()
assert calls == ["test"]
# Event listener should be gone
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {"component": "test"})
await hass.async_block_till_done()
assert calls == ["test"]
# Should be called right away
setup.async_when_setup(hass, "test", mock_callback)
await hass.async_block_till_done()
assert calls == ["test", "test"]
async def test_setup_import_blows_up(hass):
"""Test that we handle it correctly when importing integration blows up."""
with patch(
"homeassistant.loader.Integration.get_component", side_effect=ValueError
):
assert not await setup.async_setup_component(hass, "sun", {})
async def test_parallel_entry_setup(hass):
"""Test config entries are set up in parallel."""
MockConfigEntry(domain="comp", data={"value": 1}).add_to_hass(hass)
MockConfigEntry(domain="comp", data={"value": 2}).add_to_hass(hass)
calls = []
async def mock_async_setup_entry(hass, entry):
"""Mock setting up an entry."""
calls.append(entry.data["value"])
await asyncio.sleep(0)
calls.append(entry.data["value"])
return True
mock_integration(
hass, MockModule("comp", async_setup_entry=mock_async_setup_entry,),
)
mock_entity_platform(hass, "config_flow.comp", None)
await setup.async_setup_component(hass, "comp", {})
assert calls == [1, 2, 1, 2]
| 34.259386 | 88 | 0.618002 |
0170bd083e4a0324855ae0cd94c452035a3bc9e3
| 25,599 |
py
|
Python
|
core/domain/question_domain_test.py
|
arora-ria/oppia
|
698d5593689f15ee36384a57036cf2e0150bb785
|
[
"Apache-2.0"
] | 2 |
2020-10-13T12:59:08.000Z
|
2020-10-13T17:10:26.000Z
|
core/domain/question_domain_test.py
|
gitter-badger/oppia
|
7d8e659264582d7ce74bc6c139e597b82bca0e04
|
[
"Apache-2.0"
] | 1 |
2020-05-27T06:08:17.000Z
|
2020-05-27T06:08:17.000Z
|
core/domain/question_domain_test.py
|
gitter-badger/oppia
|
7d8e659264582d7ce74bc6c139e597b82bca0e04
|
[
"Apache-2.0"
] | 1 |
2020-11-05T12:26:10.000Z
|
2020-11-05T12:26:10.000Z
|
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for question domain objects."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
from core.domain import question_domain
from core.domain import state_domain
from core.tests import test_utils
import feconf
import utils
class QuestionChangeTest(test_utils.GenericTestBase):
"""Test for Question Change object."""
def test_to_dict(self):
"""Test to verify to_dict method of the Question Change object."""
expected_object_dict = {
'cmd': 'update_question_property',
'property_name': 'question_state_data',
'new_value': 'new_value',
'old_value': 'old_value',
}
change_dict = {
'cmd': 'update_question_property',
'property_name': 'question_state_data',
'new_value': 'new_value',
'old_value': 'old_value',
}
observed_object = question_domain.QuestionChange(
change_dict=change_dict,
)
self.assertEqual(expected_object_dict, observed_object.to_dict())
def test_change_dict_without_cmd(self):
"""Test to verify __init__ method of the Question Change object
when change_dict is without cmd key.
"""
self.assertRaisesRegexp(
utils.ValidationError,
'Missing cmd key in change dict',
callableObj=question_domain.QuestionChange,
change_dict={}
)
def test_change_dict_with_wrong_cmd(self):
"""Test to verify __init__ method of the Question Change object
when change_dict is with wrong cmd value.
"""
self.assertRaisesRegexp(
utils.ValidationError,
'Command wrong is not allowed',
callableObj=question_domain.QuestionChange,
change_dict={'cmd': 'wrong', }
)
def test_change_dict_with_missing_attributes_in_cmd(self):
"""Test to verify __init__ method of the Question Change object
when change_dict is with missing attributes in cmd.
"""
self.assertRaisesRegexp(
utils.ValidationError,
'The following required attributes are present: new_value',
callableObj=question_domain.QuestionChange,
change_dict={
'cmd': 'update_question_property',
'property_name': 'question_state_data',
'old_value': 'old_value'
}
)
def test_change_dict_with_extra_attributes_in_cmd(self):
"""Test to verify __init__ method of the Question Change object
when change_dict is with extra attributes in cmd.
"""
self.assertRaisesRegexp(
utils.ValidationError,
'The following extra attributes are present: invalid',
callableObj=question_domain.QuestionChange,
change_dict={'cmd': 'create_new', 'invalid': 'invalid'}
)
def test_update_question_property_with_wrong_property_name(self):
"""Test to verify __init__ method of the Question Change object
when cmd is update_question_property and wrong property_name is given.
"""
self.assertRaisesRegexp(
utils.ValidationError, (
'Value for property_name in cmd update_question_property: '
'wrong is not allowed'),
callableObj=question_domain.QuestionChange,
change_dict={
'cmd': 'update_question_property',
'property_name': 'wrong',
'new_value': 'new_value',
'old_value': 'old_value'
}
)
def test_create_new(self):
"""Test to verify __init__ method of the Question Change object
when cmd is create_new.
"""
change_dict = {
'cmd': 'create_new'
}
observed_object = question_domain.QuestionChange(
change_dict=change_dict,
)
self.assertEqual('create_new', observed_object.cmd)
def test_update_question_property(self):
"""Test to verify __init__ method of the Question Change object
when cmd is update_question_property.
"""
change_dict = {
'cmd': 'update_question_property',
'property_name': 'question_state_data',
'new_value': 'new_value',
'old_value': 'old_value'
}
observed_object = question_domain.QuestionChange(
change_dict=change_dict
)
self.assertEqual('update_question_property', observed_object.cmd)
self.assertEqual('question_state_data', observed_object.property_name)
self.assertEqual('new_value', observed_object.new_value)
self.assertEqual('old_value', observed_object.old_value)
def test_create_new_fully_specified_question(self):
"""Test to verify __init__ method of the Question Change object
when cmd is create_new_fully_specified_question.
"""
change_dict = {
'cmd': 'create_new_fully_specified_question',
'question_dict': {},
'skill_id': '10',
}
observed_object = question_domain.QuestionChange(
change_dict=change_dict,
)
self.assertEqual(
'create_new_fully_specified_question', observed_object.cmd)
self.assertEqual('10', observed_object.skill_id)
self.assertEqual({}, observed_object.question_dict)
def test_migrate_state_schema_to_latest_version(self):
"""Test to verify __init__ method of the Question Change object
when cmd is migrate_state_schema_to_latest_version.
"""
change_dict = {
'cmd': 'migrate_state_schema_to_latest_version',
'from_version': 0,
'to_version': 10,
}
observed_object = question_domain.QuestionChange(
change_dict=change_dict,
)
self.assertEqual(
'migrate_state_schema_to_latest_version', observed_object.cmd)
self.assertEqual(0, observed_object.from_version)
self.assertEqual(10, observed_object.to_version)
class QuestionSuggestionChangeTest(test_utils.GenericTestBase):
"""Test for QuestionSuggestionChange object."""
def test_to_dict(self):
"""Test to verify to_dict method of the Question Change object."""
expected_object_dict = {
'cmd': 'create_new_fully_specified_question',
'question_dict': 'question_dict',
'skill_id': 'skill_1',
'skill_difficulty': '0.3'
}
change_dict = {
'cmd': 'create_new_fully_specified_question',
'question_dict': 'question_dict',
'skill_id': 'skill_1',
'skill_difficulty': '0.3'
}
observed_object = question_domain.QuestionSuggestionChange(
change_dict=change_dict,
)
self.assertEqual(expected_object_dict, observed_object.to_dict())
def test_change_dict_without_cmd(self):
"""Test to verify __init__ method of the QuestionSuggestionChange
object when change_dict is without cmd key.
"""
self.assertRaisesRegexp(
utils.ValidationError,
'Missing cmd key in change dict',
callableObj=question_domain.QuestionSuggestionChange,
change_dict={}
)
def test_change_dict_with_wrong_cmd(self):
"""Test to verify __init__ method of the QuestionSuggestionChange object
when change_dict is with wrong cmd value.
"""
self.assertRaisesRegexp(
utils.ValidationError,
'Command wrong is not allowed',
callableObj=question_domain.QuestionSuggestionChange,
change_dict={'cmd': 'wrong', }
)
def test_change_dict_with_missing_attributes_in_cmd(self):
"""Test to verify __init__ method of the QuestionSuggestionChange object
when change_dict is with missing attributes in cmd.
"""
self.assertRaisesRegexp(
utils.ValidationError,
'The following required attributes are present: new_value',
callableObj=question_domain.QuestionSuggestionChange,
change_dict={
'cmd': 'create_new_fully_specified_question',
'question_dict': 'question_dict',
}
)
def test_change_dict_with_extra_attributes_in_cmd(self):
"""Test to verify __init__ method of the QuestionSuggestionChange object
when change_dict is with extra attributes in cmd.
"""
self.assertRaisesRegexp(
utils.ValidationError,
'The following extra attributes are present: invalid',
callableObj=question_domain.QuestionSuggestionChange,
change_dict={
'cmd': 'create_new_fully_specified_question',
'question_dict': 'question_dict',
'skill_id': 'skill_1',
'skill_difficulty': '0.3',
'invalid': 'invalid'
}
)
def test_create_new_fully_specified_question(self):
"""Test to verify __init__ method of the QuestionSuggestionChange object
when cmd is create_new_fully_specified_question.
"""
change_dict = {
'cmd': 'create_new_fully_specified_question',
'question_dict': {},
'skill_id': '10',
'skill_difficulty': '0.3',
}
observed_object = question_domain.QuestionSuggestionChange(
change_dict=change_dict,
)
self.assertEqual(
'create_new_fully_specified_question', observed_object.cmd)
self.assertEqual('10', observed_object.skill_id)
self.assertEqual({}, observed_object.question_dict)
class QuestionDomainTest(test_utils.GenericTestBase):
"""Tests for Question domain object."""
def setUp(self):
"""Before each individual test, create a question."""
super(QuestionDomainTest, self).setUp()
question_state_data = self._create_valid_question_data('ABC')
self.question = question_domain.Question(
'question_id', question_state_data,
feconf.CURRENT_STATE_SCHEMA_VERSION, 'en', 1, ['skill1'],
['skillId12345-123'])
def test_to_and_from_dict(self):
"""Test to verify to_dict and from_dict methods
of Question domain object.
"""
default_question_state_data = (
question_domain.Question.create_default_question_state())
question_dict = {
'id': 'col1.random',
'question_state_data': default_question_state_data.to_dict(),
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'language_code': 'en',
'version': 1,
'linked_skill_ids': ['skill1'],
'inapplicable_skill_misconception_ids': ['skill1-123']
}
observed_object = question_domain.Question.from_dict(question_dict)
self.assertEqual(question_dict, observed_object.to_dict())
def _assert_validation_error(self, expected_error_substring):
"""Checks that the skill passes strict validation."""
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
self.question.validate()
def test_strict_validation(self):
"""Test to verify validate method of Question domain object with
strict as True.
"""
state = self.question.question_state_data
state.interaction.solution = None
self._assert_validation_error(
'Expected the question to have a solution')
state.interaction.hints = []
self._assert_validation_error(
'Expected the question to have at least one hint')
state.interaction.default_outcome.dest = 'abc'
self._assert_validation_error(
'Expected all answer groups to have destination as None.')
state.interaction.default_outcome.labelled_as_correct = False
self._assert_validation_error(
'Expected at least one answer group to have a correct answer')
def test_strict_validation_for_answer_groups(self):
"""Test to verify validate method of Question domain object with
strict as True for interaction with answer group.
"""
state = self.question.question_state_data
state.interaction.default_outcome.labelled_as_correct = False
state.interaction.answer_groups = [
state_domain.AnswerGroup.from_dict({
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': None
})
]
self._assert_validation_error(
'Expected all answer groups to have destination as None.')
def test_validate_invalid_list_of_inapplicable_skill_misconception_ids(
self):
"""Test to verify that the validation fails when
inapplicable_skill_misconception_ids value is an invalid list.
"""
self.question.inapplicable_skill_misconception_ids = ['Test', 1]
self._assert_validation_error(
r'Expected inapplicable_skill_misconception_ids to be a list of '
r'strings, received \[u\'Test\', 1\]')
def test_validate_invalid_type_of_inapplicable_skill_misconception_ids(
self):
"""Test to verify that the validation fails when
inapplicable_skill_misconception_ids value is an invalid type.
"""
self.question.inapplicable_skill_misconception_ids = 123
self._assert_validation_error(
'Expected inapplicable_skill_misconception_ids to be a list of '
'strings, received 123')
def test_validate_invalid_format_of_inapplicable_skill_misconception_ids(
self):
"""Test to verify that the validation fails when
inapplicable_skill_misconception_ids value is an invalid format i.e.
it is not of the form <skill-id>-<misconception-id>.
"""
self.question.inapplicable_skill_misconception_ids = ['abc', 'def']
self._assert_validation_error(
r'Expected inapplicable_skill_misconception_ids to be a list '
r'of strings of the format <skill_id>-<misconception_id>, '
r'received \[u\'abc\', u\'def\'\]')
def test_validate_duplicate_inapplicable_skill_misconception_ids_list(
self):
"""Test to verify that the validation fails when
inapplicable_skill_misconception_ids list is has duplicate values.
"""
self.question.inapplicable_skill_misconception_ids = [
'skillid12345-1', 'skillid12345-1']
self._assert_validation_error(
'inapplicable_skill_misconception_ids has duplicate values')
def test_strict_validation_passes(self):
"""Test to verify validate method of a finalized Question domain object
with correct input.
"""
self.question.validate()
def test_not_strict_validation(self):
"""Test to verify validate method of Question domain object with
strict as False.
"""
self.question.language_code = 'abc'
self._assert_validation_error('Invalid language code')
self.question.question_state_data = 'State data'
self._assert_validation_error(
'Expected question state data to be a State object')
self.question.question_state_data_schema_version = 'abc'
self._assert_validation_error(
'Expected schema version to be an integer')
self.question.linked_skill_ids = 'Test'
self._assert_validation_error(
'Expected linked_skill_ids to be a list of strings')
self.question.linked_skill_ids = None
self._assert_validation_error(
'inked_skill_ids is either null or an empty list')
self.question.linked_skill_ids = []
self._assert_validation_error(
'linked_skill_ids is either null or an empty list')
self.question.linked_skill_ids = ['Test', 1]
self._assert_validation_error(
'Expected linked_skill_ids to be a list of strings')
self.question.linked_skill_ids = ['skill1', 'skill1']
self._assert_validation_error(
'linked_skill_ids has duplicate skill ids')
self.question.language_code = 1
self._assert_validation_error('Expected language_code to be a string')
self.question.version = 'abc'
self._assert_validation_error('Expected version to be an integer')
self.question.id = 123
self._assert_validation_error('Expected ID to be a string')
def test_create_default_question(self):
"""Test to verify create_default_question method of the Question domain
object.
"""
question_id = 'col1.random'
skill_ids = ['test_skill1', 'test_skill2']
question = question_domain.Question.create_default_question(
question_id, skill_ids)
default_question_data = (
question_domain.Question.create_default_question_state().to_dict())
self.assertEqual(question.id, question_id)
self.assertEqual(
question.question_state_data.to_dict(), default_question_data)
self.assertEqual(question.language_code, 'en')
self.assertEqual(question.version, 0)
self.assertEqual(question.linked_skill_ids, skill_ids)
def test_update_language_code(self):
"""Test to verify update_language_code method of the Question domain
object.
"""
self.question.update_language_code('pl')
self.assertEqual('pl', self.question.language_code)
def test_update_linked_skill_ids(self):
"""Test to verify update_linked_skill_ids method of the Question domain
object.
"""
self.question.update_linked_skill_ids(['skill_id1'])
self.assertEqual(['skill_id1'], self.question.linked_skill_ids)
def test_update_inapplicable_skill_misconception_ids(self):
"""Test to verify update_inapplicable_skill_misconception_ids method
of the Question domain object.
"""
self.assertEqual(
['skillId12345-123'],
self.question.inapplicable_skill_misconception_ids)
self.question.update_inapplicable_skill_misconception_ids(
['skillid-misconceptionid'])
self.assertEqual(
['skillid-misconceptionid'],
self.question.inapplicable_skill_misconception_ids)
def test_update_question_state_data(self):
"""Test to verify update_question_state_data method of the Question
domain object.
"""
question_state_data = self._create_valid_question_data('Test')
self.question.update_question_state_data(question_state_data)
self.assertEqual(
question_state_data.to_dict(),
self.question.question_state_data.to_dict()
)
class QuestionSummaryTest(test_utils.GenericTestBase):
"""Test for Question Summary object."""
def setUp(self):
super(QuestionSummaryTest, self).setUp()
self.fake_date_created = datetime.datetime(
2018, 11, 17, 20, 2, 45, 0)
self.fake_date_updated = datetime.datetime(
2018, 11, 17, 20, 3, 14, 0)
self.observed_object = question_domain.QuestionSummary(
question_id='question_1',
question_content='<p>question content</p>',
interaction_id='TextInput',
question_model_created_on=self.fake_date_created,
question_model_last_updated=self.fake_date_updated,
misconception_ids=['skill1-1', 'skill2-2']
)
def test_to_dict(self):
"""Test to verify to_dict method of the Question Summary
object.
"""
expected_object_dict = {
'id': 'question_1',
'question_content': '<p>question content</p>',
'interaction_id': 'TextInput',
'last_updated_msec': utils.get_time_in_millisecs(
self.fake_date_updated),
'created_on_msec': utils.get_time_in_millisecs(
self.fake_date_created),
'misconception_ids': ['skill1-1', 'skill2-2']
}
self.assertEqual(expected_object_dict, self.observed_object.to_dict())
def test_validation_with_valid_properties(self):
self.observed_object.validate()
def test_validation_with_invalid_id(self):
self.observed_object.id = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected id to be a string, received 1'):
self.observed_object.validate()
def test_validation_with_invalid_interaction_id(self):
self.observed_object.interaction_id = 1
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected interaction id to be a string, received 1'):
self.observed_object.validate()
def test_validation_with_invalid_question_content(self):
self.observed_object.question_content = 1
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected question content to be a string, received 1'):
self.observed_object.validate()
def test_validation_with_invalid_created_on(self):
self.observed_object.created_on = 1
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected created on to be a datetime, received 1'):
self.observed_object.validate()
def test_validation_with_invalid_last_updated(self):
self.observed_object.last_updated = 1
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected last updated to be a datetime, received 1'):
self.observed_object.validate()
def test_validate_invalid_list_of_misconception_ids(self):
"""Test to verify that the validation fails when
misconception_ids value is an invalid list.
"""
self.observed_object.misconception_ids = ['Test', 1]
with self.assertRaisesRegexp(
utils.ValidationError,
r'Expected misconception ids to be a list of strings, '
r'received \[u\'Test\', 1\]'):
self.observed_object.validate()
def test_validate_invalid_type_of_misconception_ids(self):
"""Test to verify that the validation fails when
misconception_ids value is an invalid type.
"""
self.observed_object.misconception_ids = 123
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected misconception ids to be a list of strings, '
'received 123'):
self.observed_object.validate()
class QuestionSkillLinkDomainTest(test_utils.GenericTestBase):
"""Test for Question Skill Link Domain object."""
def test_to_dict(self):
"""Test to verify to_dict method of the Question Skill Link Domain
object.
"""
expected_object_dict = {
'question_id': 'testquestion',
'skill_id': 'testskill',
'skill_description': 'testskilldescription',
'skill_difficulty': 0.5,
}
observed_object = question_domain.QuestionSkillLink(
'testquestion', 'testskill', 'testskilldescription', 0.5)
self.assertEqual(expected_object_dict, observed_object.to_dict())
class MergedQuestionSkillLinkDomainTest(test_utils.GenericTestBase):
"""Test for Merged Question Skill Link Domain object."""
def test_to_dict(self):
"""Test to verify to_dict method of the Merged Question Skill Link
Domain object.
"""
expected_object_dict = {
'question_id': 'testquestion',
'skill_ids': ['testskill'],
'skill_descriptions': ['testskilldescription'],
'skill_difficulties': [0.5],
}
observed_object = question_domain.MergedQuestionSkillLink(
'testquestion', ['testskill'], ['testskilldescription'], [0.5])
self.assertEqual(expected_object_dict, observed_object.to_dict())
| 39.082443 | 80 | 0.643814 |
69ee2b5a4ecd2a458f15db1354a4795fcbf43127
| 823 |
py
|
Python
|
easy-receptive-fields-pytorch/houseNet/train.py
|
Swinsie/cv-rep-fork
|
1c3454934645e3f9afa39ba90c3b216eb9ed7f75
|
[
"Apache-2.0"
] | 17 |
2020-03-09T07:13:07.000Z
|
2022-03-31T13:49:03.000Z
|
easy-receptive-fields-pytorch/houseNet/train.py
|
Swinsie/cv-rep-fork
|
1c3454934645e3f9afa39ba90c3b216eb9ed7f75
|
[
"Apache-2.0"
] | null | null | null |
easy-receptive-fields-pytorch/houseNet/train.py
|
Swinsie/cv-rep-fork
|
1c3454934645e3f9afa39ba90c3b216eb9ed7f75
|
[
"Apache-2.0"
] | 5 |
2020-03-25T14:46:59.000Z
|
2021-07-18T17:03:49.000Z
|
import configparser
import numpy as np
from keras.metrics import binary_accuracy
from keras.optimizers import Adam
from networks.trainNetwork import train_network
from networks.uNet3 import UNet3
from util.losses import dice
from util.metrics import intersection_over_union
config = configparser.ConfigParser()
config.read('config.ini')
weights_file_name = 'myUNet'
net = UNet3()
net.compile(optimizer=Adam(), loss=dice, metrics=[intersection_over_union, binary_accuracy])
train_network(model=net,
path_config=config,
weights_file_name=weights_file_name,
batch_size=8,
train_random_state=np.random.RandomState(2009),
val_random_state=np.random.RandomState(2013),
epochs=50,
checkpoint_period=10,
verbose=2)
| 30.481481 | 92 | 0.71932 |
f7ba165d566d105b1aa8ce9af97d76d3582d3cbe
| 7,216 |
py
|
Python
|
pyvies/__init__.py
|
MrMebelMan/pyvies
|
a0f9c1a5b17be61813bcd3a2308e64ed350df8bb
|
[
"MIT"
] | 1 |
2020-05-22T14:17:41.000Z
|
2020-05-22T14:17:41.000Z
|
pyvies/__init__.py
|
MrMebelMan/pyvies
|
a0f9c1a5b17be61813bcd3a2308e64ed350df8bb
|
[
"MIT"
] | null | null | null |
pyvies/__init__.py
|
MrMebelMan/pyvies
|
a0f9c1a5b17be61813bcd3a2308e64ed350df8bb
|
[
"MIT"
] | 1 |
2019-01-08T14:58:05.000Z
|
2019-01-08T14:58:05.000Z
|
#!/usr/bin/env python3
from requests import post as requests_post
from bs4 import BeautifulSoup as Soup
from re import match as re_match
NoneType = type(None)
class Vies:
# ISO 3166-1 alpha-2 country codes.
EU_COUNTRY_CODES = set([
'AT', # Austria.
'BE', # Belgium.
'BG', # Bulgaria.
'CY', # Cyprus.
'CZ', # Czech Republic.
'DE', # Germany.
'DK', # Denmark.
'EE', # Estonia.
'ES', # Spain.
'FI', # Finland.
'FR', # France.
'GB', # United Kingdom.
'EL', # Greece.
'HR', # Croatia.
'HU', # Hungary.
'IE', # Ireland.
'IT', # Italy.
'LT', # Lithuania.
'LU', # Luxembourg.
'LV', # Latvia.
'MT', # Malta.
'NL', # Netherlands.
'PL', # Poland.
'PT', # Portugal.
'RO', # Romania.
'SE', # Sweden.
'SI', # Slovenia.
'SK', # Slovakia.
])
COUNTRY_CODE_ALIASES = {
'GR': 'EL',
}
def request(self, vat_id: (str, NoneType), country_code: (str, NoneType) = '', bypass_ratelimit: bool = False):
allowed_arg_types = (NoneType, str)
vat_re = r'^([0-9A-Za-z]{2,12})$'
country_code_re = r'^([A-Z]{2})$'
if not isinstance(vat_id, allowed_arg_types):
raise TypeError('vat_id should be either str, or NoneType, not %s' % type(vat_id))
elif not isinstance(country_code, allowed_arg_types):
raise TypeError('country_code should be either str, or NoneType, not %s') % type(country_code)
country_code = country_code or ''
country_code = country_code.upper()
if country_code in self.COUNTRY_CODE_ALIASES:
country_code = self.COUNTRY_CODE_ALIASES[country_code]
vat_id = vat_id.lstrip().rstrip().upper() if vat_id else ''
vat_id = ''.join([c for c in vat_id if c not in '\n\t -'])
request = ViesRequest(vat_id, country_code)
if len(vat_id) < 2:
request.error = 'vat_id (%s) should be at least 2 characters long' % vat_id
elif country_code and vat_id[:2] == country_code:
vat_id = vat_id[2:]
elif not country_code:
country_code, vat_id = vat_id[:2], vat_id[2:]
if request.error:
request.is_valid = False
return request
if not re_match(vat_re, vat_id):
request.error = "vat_id '%s' doesn't match the pattern '%s'" % (vat_id, vat_re)
elif not re_match(country_code_re, country_code):
request.error = "country_code '%s' doesn't match the pattern '%s'" % (country_code, country_code_re)
elif country_code not in self.EU_COUNTRY_CODES:
request.error = 'unsupported country code: "%s"' % country_code
if request.error:
request.is_valid = False
return request
request.country_code = country_code
request.vat_id = vat_id
request.post(bypass_ratelimit)
return request
class ViesRequest:
RATELIMIT_RESPONSE = 'MS_UNAVAILABLE'
url = 'http://ec.europa.eu/taxation_customs/vies/services/checkVatService'
def __init__(self, vat_id: str, country_code: str):
self.timeout = 10
self.vat_id = vat_id
self.country_code = country_code
self.is_valid = None
self.company_name = None
self.company_address = None
self.data = None
self.response = None
self.error = None
def __str__(self):
if self.is_valid is None:
validity = 'not validated'
elif self.is_valid:
validity = 'valid'
else:
validity = 'invalid'
country_code = self.country_code or ''
vat_id = self.vat_id or ''
ret = 'VAT number "%s%s" (%s)' % (country_code, vat_id, validity)
if self.error:
ret += ', error: %s' % self.error
return ret
@property
def pretty(self):
if self.response:
return self.soup.prettify()
def save_error(self):
error_attr = self.soup.find('faultstring')
if error_attr:
self.error = error_attr.text
def get_tag_text(self, name: str, optional: bool = False):
tag = self.soup.find(name)
if not tag:
if not optional:
self.is_valid = False
self.save_error()
return None
else:
return tag.text
def validate(self, bypass_ratelimit=False):
self.soup = Soup(self.response.text, 'xml')
self.is_valid = self.get_tag_text('valid') == 'true'
if bypass_ratelimit and self.error == self.RATELIMIT_RESPONSE:
self.error = None
self.is_valid = True
return # we will not get the company name and address from ratelimited response anyway
self.company_name = self.get_tag_text('name', optional=True)
if self.company_name:
self.company_name = self.company_name.replace('---', '') or None
self.company_address = self.get_tag_text('address', optional=True)
if self.company_address:
self.company_address = self.company_address.replace('---', '') or None
def post(self, bypass_ratelimit: bool = False):
# bypass_ratelimit is a switch to bypass the 1 minute API ban after sending the same data twice
# API returns valid=False correctly for invalid requests, even when ratelimited
# The idea is to exploit this behaviour by first sending the invalid request for the same country,
# making sure that server returned the correct valid=False response,
# and then continuing to check the real VAT ID, considering ratelimit error as success
headers = {'Content-type': 'text/xml'}
xml_request = '' \
'<?xml version="1.0" encoding="UTF-8"?>' \
'<SOAP-ENV:Envelope ' \
'xmlns:ns0="urn:ec.europa.eu:taxud:vies:services:checkVat:types" ' \
'xmlns:ns1="http://schemas.xmlsoap.org/soap/envelope/" ' \
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' \
'xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">' \
'<SOAP-ENV:Header/>' \
'<ns1:Body>' \
'<ns0:checkVat>' \
'<ns0:countryCode>%s</ns0:countryCode>' \
'<ns0:vatNumber>%s</ns0:vatNumber>' \
'</ns0:checkVat>' \
'</ns1:Body>' \
'</SOAP-ENV:Envelope>'
self.data = xml_request % (self.country_code, self.vat_id)
if bypass_ratelimit:
data = xml_request % (self.country_code, '1337')
self.response = requests_post(
url=self.url,
data=data,
headers=headers,
timeout=self.timeout
)
self.validate()
if self.error:
return # The server is down, do not try to send the real request
self.response = requests_post(
url=self.url,
data=self.data,
headers=headers,
timeout=self.timeout
)
self.validate(bypass_ratelimit)
| 32.651584 | 115 | 0.573032 |
0475370627154153a57372b2b1879cfa44ff17b5
| 10,761 |
py
|
Python
|
pyabsa/functional/checkpoint/checkpoint_manager.py
|
onlyrico/PyABSA
|
d0905eb5253eaa564d2244cd777e3a734bca777a
|
[
"MIT"
] | null | null | null |
pyabsa/functional/checkpoint/checkpoint_manager.py
|
onlyrico/PyABSA
|
d0905eb5253eaa564d2244cd777e3a734bca777a
|
[
"MIT"
] | null | null | null |
pyabsa/functional/checkpoint/checkpoint_manager.py
|
onlyrico/PyABSA
|
d0905eb5253eaa564d2244cd777e3a734bca777a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# file: checkpoint_manager.py
# time: 2021/6/11 0011
# author: yangheng <[email protected]>
# github: https://github.com/yangheng95
# Copyright (C) 2021. All Rights Reserved.
import json
import os.path
import sys
import zipfile
from autocuda import auto_cuda
from findfile import find_files, find_dir, find_file
from google_drive_downloader import GoogleDriveDownloader as gdd
from termcolor import colored
from pyabsa import __version__
from pyabsa.core.apc.prediction.sentiment_classifier import SentimentClassifier
from pyabsa.core.atepc.prediction.aspect_extractor import AspectExtractor
from pyabsa.core.tc.prediction.text_classifier import TextClassifier
def unzip_checkpoint(zip_path):
try:
print('Find zipped checkpoint: {}, unzipping...'.format(zip_path))
sys.stdout.flush()
with zipfile.ZipFile(zip_path, 'r') as z:
z.extractall(zip_path.replace('.zip', ''))
print('Done.')
except zipfile.BadZipfile:
print('Unzip failed'.format(zip_path))
return zip_path.replace('.zip', '')
class APCCheckpointManager:
@staticmethod
def get_sentiment_classifier(checkpoint: str = None,
sentiment_map: dict = None,
auto_device: bool = True):
if find_dir(os.getcwd(), checkpoint):
checkpoint = find_dir(os.getcwd(), checkpoint)
elif checkpoint.endswith('.zip'):
checkpoint = unzip_checkpoint(find_file(os.getcwd(), checkpoint))
elif not find_dir(os.getcwd(), checkpoint):
checkpoint = APCCheckpointManager.get_checkpoint(checkpoint)
sent_classifier = SentimentClassifier(find_dir(os.getcwd(), checkpoint), sentiment_map=sentiment_map)
sent_classifier.to(auto_cuda()) if auto_device else sent_classifier.cpu()
return sent_classifier
@staticmethod
def get_checkpoint(checkpoint: str = 'Chinese'):
aspect_sentiment_classification_checkpoint = update_checkpoints('APC')
if checkpoint.lower() in [k.lower() for k in aspect_sentiment_classification_checkpoint.keys()]:
print(colored('Downloading checkpoint:{} from Google Drive...'.format(checkpoint), 'green'))
else:
print(colored(
'Checkpoint:{} is not found, you can raise an issue for requesting shares of checkpoints'.format(
checkpoint), 'red'))
sys.exit(-1)
return download_pretrained_model(task='apc',
language=checkpoint.lower(),
archive_path=aspect_sentiment_classification_checkpoint[checkpoint.lower()]['id'])
class ATEPCCheckpointManager:
@staticmethod
def get_aspect_extractor(checkpoint: str = None,
sentiment_map: dict = None,
auto_device: bool = True):
if find_dir(os.getcwd(), checkpoint):
checkpoint = find_dir(os.getcwd(), checkpoint)
elif checkpoint.endswith('.zip'):
checkpoint = unzip_checkpoint(find_file(os.getcwd(), checkpoint))
elif not find_dir(os.getcwd(), checkpoint):
checkpoint = ATEPCCheckpointManager.get_checkpoint(checkpoint)
aspect_extractor = AspectExtractor(find_dir(os.getcwd(), checkpoint), sentiment_map=sentiment_map)
aspect_extractor.to(auto_cuda()) if auto_device else aspect_extractor.cpu()
return aspect_extractor
@staticmethod
def get_checkpoint(checkpoint: str = 'Chinese'):
atepc_checkpoint = update_checkpoints('ATEPC')
if checkpoint.lower() in [k.lower() for k in atepc_checkpoint.keys()]:
print(colored('Downloading checkpoint:{} from Google Drive...'.format(checkpoint), 'green'))
else:
print(colored('Checkpoint:{} is not found.'.format(checkpoint), 'red'))
sys.exit(-1)
return download_pretrained_model(task='atepc',
language=checkpoint.lower(),
archive_path=atepc_checkpoint[checkpoint.lower()]['id'])
class TextClassifierCheckpointManager:
@staticmethod
def get_text_classifier(checkpoint=None,
label_map=None,
auto_device=True):
if find_dir(os.getcwd(), checkpoint):
checkpoint = find_dir(os.getcwd(), checkpoint)
elif checkpoint.endswith('.zip'):
checkpoint = unzip_checkpoint(find_file(os.getcwd(), checkpoint))
elif not find_dir(os.getcwd(), checkpoint):
checkpoint = TextClassifierCheckpointManager.get_checkpoint(checkpoint)
text_classifier = TextClassifier(find_dir(os.getcwd(), checkpoint), label_map=label_map)
text_classifier.to(auto_cuda()) if auto_device else text_classifier.cpu()
return text_classifier
@staticmethod
def get_checkpoint(checkpoint: str = 'Chinese'):
text_classification_checkpoint = update_checkpoints('TextClassification')
if checkpoint.lower() in [k.lower() for k in text_classification_checkpoint.keys()]:
print(colored('Downloading checkpoint:{} from Google Drive...'.format(checkpoint), 'green'))
else:
print(colored('Checkpoint:{} is not found.'.format(checkpoint), 'red'))
sys.exit(-1)
return download_pretrained_model(task='atepc',
language=checkpoint.lower(),
archive_path=text_classification_checkpoint[checkpoint.lower()]['id'])
def compare_version(version1, version2):
# 1 means greater, 0 means equal, -1 means lower
if version1 and not version2:
return 1
elif version2 and not version1:
return -1
else:
version1 = version1.split('.')
version2 = version2.split('.')
for v1, v2 in zip(version1, version2):
if len(v1) == len(v2):
if v1 > v2:
return 1
if v2 > v1:
return -1
else:
if v1.startswith(v2):
return -1
elif v2.startswith(v1):
return 1
elif v1 == v2:
return 0
else:
return int(v1 > v2)
return 0
def parse_checkpoint_info(t_checkpoint_map, task='APC'):
print('*' * 23, colored('Available {} model checkpoints for Version:{}'.format(task, __version__), 'green'), '*' * 23)
for i, checkpoint in enumerate(t_checkpoint_map):
print('-' * 100)
print("{}. Checkpoint Name: {}\nDescription: {}\nComment: {} \nVersion: {}".format(
i + 1,
checkpoint,
t_checkpoint_map[checkpoint]['description']
if 'description' in t_checkpoint_map[checkpoint] else '',
t_checkpoint_map[checkpoint]['comment']
if 'comment' in t_checkpoint_map[checkpoint] else '',
t_checkpoint_map[checkpoint]['version']
if 'version' in t_checkpoint_map[checkpoint] else ''
))
print('-' * 100)
return t_checkpoint_map
def update_checkpoints(task=''):
try:
checkpoint_url = '1jjaAQM6F9s_IEXNpaY-bQF9EOrhq0PBD'
if os.path.isfile('./checkpoints.json'):
os.remove('./checkpoints.json')
gdd.download_file_from_google_drive(file_id=checkpoint_url, dest_path='./checkpoints.json')
checkpoint_map = json.load(open('./checkpoints.json', 'r'))
current_version_map = {}
for t_map in checkpoint_map:
if '-' in t_map:
min_ver, _, max_ver = t_map.partition('-')
elif '+' in t_map:
min_ver, _, max_ver = t_map.partition('-')
else:
min_ver = t_map
max_ver = ''
max_ver = max_ver if max_ver else 'N.A.'
if compare_version(min_ver, __version__) <= 0 and compare_version(__version__, max_ver) <= 0:
current_version_map.update(checkpoint_map[t_map]) # add checkpoint_map[t_map]
t_checkpoint_map = {}
if task:
t_checkpoint_map = dict(current_version_map)[task.upper()] if task in current_version_map else {}
parse_checkpoint_info(t_checkpoint_map, task)
else:
for task_map in current_version_map:
parse_checkpoint_info(current_version_map[task_map], task_map)
# os.remove('./checkpoints.json')
return t_checkpoint_map if task else current_version_map
except Exception as e:
print('\nFailed to query checkpoints (Error: {}), try manually download the checkpoints from: \n'.format(e),
'[1]\tGoogle Drive\t: https://drive.google.com/drive/folders/1yiMTucHKy2hAx945lgzhvb9QeHvJrStC\n'
'[2]\tBaidu NetDisk\t: https://pan.baidu.com/s/1K8aYQ4EIrPm1GjQv_mnxEg (Access Code: absa)\n')
sys.exit(-1)
def download_pretrained_model(task='apc', language='chinese', archive_path='', model_name='any_model'):
print(colored('Notice: The pretrained model are used for testing, '
'neither trained using fine-tuned the hyper-parameters nor trained with enough steps, '
'it is recommended to train the model on your own custom datasets', 'red')
)
# if not os.path.exists('./checkpoints'):
# os.mkdir('./checkpoints')
tmp_dir = '{}_{}_TRAINED_MODEL'.format(task.upper(), language.upper())
dest_path = os.path.join('./checkpoints', tmp_dir)
if not os.path.exists(dest_path):
os.makedirs(dest_path)
if find_files(dest_path, '.model') and find_files(dest_path, '.config'):
return dest_path
save_path = os.path.join(dest_path, '{}.zip'.format(model_name))
try:
if '/' in archive_path:
archive_path = archive_path.split('/')[-2]
gdd.download_file_from_google_drive(file_id=archive_path,
dest_path=save_path,
unzip=True,
showsize=True)
except:
raise ConnectionError("Fail to download checkpoint, seems to be a connection error.")
os.remove(save_path)
return dest_path
def load_sentiment_classifier(checkpoint: str = None,
sentiment_map: dict = None,
auto_device: bool = True):
infer_model = SentimentClassifier(checkpoint, sentiment_map=sentiment_map)
infer_model.to(auto_cuda()) if auto_device else infer_model.cpu()
return infer_model
| 43.216867 | 123 | 0.617508 |
5db5cb7fd89ecb47981c8921f43411b6ab6d59d0
| 2,078 |
py
|
Python
|
scripts/create_background_data.py
|
open-dynamic-robot-initiative/trifinger_object_tracking
|
d57cd6abcd880c228b4e26a8b8deeaea55d32828
|
[
"BSD-3-Clause"
] | 6 |
2020-11-03T12:58:54.000Z
|
2022-01-05T23:39:52.000Z
|
scripts/create_background_data.py
|
open-dynamic-robot-initiative/trifinger_object_tracking
|
d57cd6abcd880c228b4e26a8b8deeaea55d32828
|
[
"BSD-3-Clause"
] | 1 |
2020-12-03T10:06:13.000Z
|
2020-12-03T10:06:13.000Z
|
scripts/create_background_data.py
|
open-dynamic-robot-initiative/trifinger_object_tracking
|
d57cd6abcd880c228b4e26a8b8deeaea55d32828
|
[
"BSD-3-Clause"
] | 5 |
2020-11-12T00:43:41.000Z
|
2021-05-21T14:21:25.000Z
|
#!/usr/bin/env python3
# %%
import argparse
import os
import cv2 as cv
import numpy as np
from PIL import Image
from scipy import ndimage
def show_image(image):
image = Image.fromarray(image[:, :, ::-1], "RGB")
image.show()
def load_images(path):
image_names = [n for n in os.listdir(path) if ".png" in n]
images = {}
for image_name in image_names:
image_path = os.path.join(path, image_name)
image = cv.imread(image_path) # BGR
images[image_name] = image
return images
# %%
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"data_dir",
type=str,
help="Directory containing the dataset",
)
args = parser.parse_args()
# %%
# load data ----------------------------------------------------
frame_folders = [
f
for f in os.listdir(args.data_dir)
if os.path.isdir(os.path.join(args.data_dir, f))
]
for frame_folder in frame_folders:
frame_path = os.path.join(args.data_dir, frame_folder)
segment_names = [
name for name in os.listdir(frame_path) if name[1].isdigit()
]
if len(segment_names) == 0:
continue
images = load_images(frame_path)
for camera_name in ["60", "180", "300"]:
image = images["camera" + camera_name + ".png"]
masks = {
key: images[key]
for key in images.keys()
if camera_name in key and key[1].isdigit() and key[0] != "n"
}
for key in masks:
binary_mask = np.sum(masks[key], axis=2) == 0
eroded_binary_mask = ndimage.binary_erosion(
binary_mask, structure=np.ones([10, 10])
)
image = eroded_binary_mask[..., None] * image
cv.imwrite(
os.path.join(frame_path, "n" + camera_name + ".png"), image
)
print(os.path.join(frame_path, "n" + camera_name + ".png"))
| 25.975 | 76 | 0.54283 |
4b38618b312709d7a15ca792cc55d4094e1afe08
| 9,383 |
py
|
Python
|
scripts/stastic_disparity.py
|
HKBU-HPML/FADNet-PP
|
6e653e8f1fa0f55f10068f5592cbc8b49bb571e4
|
[
"MIT"
] | null | null | null |
scripts/stastic_disparity.py
|
HKBU-HPML/FADNet-PP
|
6e653e8f1fa0f55f10068f5592cbc8b49bb571e4
|
[
"MIT"
] | null | null | null |
scripts/stastic_disparity.py
|
HKBU-HPML/FADNet-PP
|
6e653e8f1fa0f55f10068f5592cbc8b49bb571e4
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import os
import numpy as np
import csv
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.ticker import PercentFormatter
from PIL import Image, ImageOps
from skimage import io
from utils.preprocess import load_pfm
FONTSIZE=20
matplotlib.rcParams.update({'font.size': FONTSIZE})
#matplotlib.rc('xtick', labelsize=FONTSIZE)
#matplotlib.rc('ytick', labelsize=FONTSIZE)
#matplotlib.rc('xlabel', labelsize=FONTSIZE)
#matplotlib.rc('ylabel', labelsize=FONTSIZE)
#DATAPATH = '/media/sf_Shared_Data/gpuhomedataset/dispnet'
#DATAPATH = '/home/datasets/imagenet/dispnet/virtual'
#DATAPATH = '/home/datasets/imagenet/dispnet'
DATAPATH = './data'
OUTPUTPATH = '/tmp'
#FILELIST = 'FlyingThings3D_release_TEST.list'
#FILELIST = 'FlyingThings3D_release_TRAIN.list'
#FILELIST='lists/real_release.list'
#FILELIST='lists/kitti-groundtruth.list'
#FILELIST='lists/kitti2015_train.list'
#FILELIST='lists/MB2014_TRAIN.list'
#FILELIST='lists/eth3d_train.list'
FILELIST='lists/FlyingThings3D_release_TRAIN_100.list'
RESULTLIST = 'NEW_' + FILELIST
CLEANRESULTLIST = 'CLEAN_' + FILELIST
def plot_hist(d, save=False, filename=None, plot=True, color='r'):
flatten = d.ravel()
mean = np.mean(flatten)
max = np.max(flatten)
std = np.std(flatten)
print('len: %d, mean: %.3f, std: %.3f' % (len(flatten), mean, std))
#return n_neg, flatten.size # return #negative, total
if plot:
#count, bins, ignored = plt.hist(flatten, 50, color=color)
flatten = flatten[np.abs(flatten) > 0.0]
count, bins, ignored = plt.hist(flatten, bins=np.arange(0,300), density=True, color=color)
plt.gca().yaxis.set_major_formatter(PercentFormatter(1))
plt.xlabel('Disparity')
plt.ylabel('Percentage')
if save:
plt.savefig(os.path.join(OUTPUTPATH, '%s.pdf'%filename), bbox_inches='tight')
else:
#plt.show()
pass
#plt.clf()
return mean, std, max
def statistic(file_list):
img_pairs = []
with open(file_list, "r") as f:
img_pairs = f.readlines()
csv_file = open(RESULTLIST, 'a')
for f in img_pairs:
names = f.split()
name = names[2]
print('Name: ', name)
gt_disp_name = os.path.join(DATAPATH, name)
gt_disp, scale = load_pfm(gt_disp_name)
print('Shape: ', gt_disp.shape, ', Mean: ', np.mean(gt_disp))
name_items = name.split('/')
save_name = 'hist_{}_{}_{}'.format(name_items[-4], name_items[-3], name_items[-1].split('.')[0])
mean, std, max = plot_hist(gt_disp, save=True, filename=save_name, plot=False)
writer = csv.writer(csv_file, delimiter='\t')
writer.writerow([name, mean, std, max])
csv_file.close()
def statistic_with_file(fn):
result_file = open(CLEANRESULTLIST, 'a')
with open(fn, 'r') as f:
total_array = []
fns = []
for line in f.readlines():
items = line.split('\t')
total_array.append([float(i) for i in items[1:]])
fns.append(items[0])
total_array = np.array(total_array)
print('Shape: ', total_array[:, 0].shape)
for i, mean in enumerate(total_array[:, 0]):
if mean < 150:
grt = fns[i]
name_items = grt.split('/')
left = 'FlyingThings3D_release/frames_cleanpass/%s/%s/%s/left/%s.png' % (name_items[-5], name_items[-4], name_items[-3], name_items[-1].split('.')[0])
right = 'FlyingThings3D_release/frames_cleanpass/%s/%s/%s/right/%s.png' % (name_items[-5], name_items[-4], name_items[-3], name_items[-1].split('.')[0])
#result_file.write("%s %s %s\n" % (left, right, fns[i]))
plot_hist(total_array[:, 0])
#plot_hist(total_array[:, 1])
#plot_hist(total_array[:, 2])
result_file.close()
def statistic_mean_std(filelist):
img_pairs = []
with open(filelist, "r") as f:
img_pairs = f.readlines()
means = []
for f in img_pairs:
names = f.split()
leftname = names[0]
rightname = names[1]
leftfn = os.path.join(DATAPATH, leftname)
rightfn = os.path.join(DATAPATH, rightname)
leftimgdata = io.imread(leftfn)
rightimgdata = io.imread(rightfn)
leftmean = np.mean(leftimgdata.ravel())
rightmean = np.mean(rightimgdata.ravel())
print('leftmean: ', leftmean)
print('rightmean: ', rightmean)
means.append((leftmean+rightmean)/2)
means = np.array(means)
print('total mean: ', np.mean(means))
print('total std: ', np.std(means))
def statistic_disparity(filelist):
img_pairs = []
with open(filelist, "r") as f:
img_pairs = f.readlines()
all = np.array([], dtype=np.float32)
for f in img_pairs:
names = f.split()
dispname = names[2]
fn = os.path.join(DATAPATH, dispname)
print('fn: ', fn)
if fn.find('.png') >= 0:
gt_disp = Image.open(fn)
gt_disp = np.ascontiguousarray(gt_disp,dtype=np.float32)/256
else:
gt_disp, _ = load_pfm(fn)
gt_disp[np.isinf(gt_disp)] = 0
all = np.concatenate((gt_disp.ravel(), all))
mean = np.mean(all)
std = np.std(all)
color='#A9D18E'
mean, std, max = plot_hist(all, save=True, filename=filelist, plot=True, color=color)
print('total mean: ', mean)
print('total std: ', std)
def statistic_kitti_disparity(filelist):
img_pairs = []
with open(filelist, "r") as f:
img_pairs = f.readlines()
all = np.array([], dtype=np.float32)
for f in img_pairs:
dispname = f[:-1]
fn = dispname
gt_disp = Image.open(fn)
gt_disp = np.ascontiguousarray(gt_disp,dtype=np.float32)/256
#gt_disp, _ = load_pfm(fn)
all = np.concatenate((gt_disp.ravel(), all))
np.save('stat.npy', all)
mean = np.mean(all)
std = np.std(all)
print('total mean: ', mean)
print('total std: ', std)
mean, std, max = plot_hist(all, save=True, filename='real_disp.png', plot=False, color='r')
def force_plot():
all = np.load('stat.npy')
mean, std, max = plot_hist(all, save=True, filename='real_disp.png', plot=True, color='r')
def plot_hist_with_filename(fn):
fnt='img00000.bmp'
leftfn = '/media/sf_Shared_Data/gpuhomedataset/dispnet/real_release/frames_cleanpass/left/%s'%fnt
rightfn = '/media/sf_Shared_Data/gpuhomedataset/dispnet/real_release/frames_cleanpass/right/%s'%fnt
realimgdata = io.imread(leftfn)
#leftfn = '/media/sf_Shared_Data/gpuhomedataset/FlyingThings3D_release/frames_cleanpass/TRAIN/A/0001/left/%s'%fn
#rightfn = '/media/sf_Shared_Data/gpuhomedataset/FlyingThings3D_release/frames_cleanpass/TRAIN/A/0001/right/%s'%fn
#realimgdata = io.imread(leftfn)
leftfn = '/media/sf_Shared_Data/gpuhomedataset/FlyingThings3D_release/frames_cleanpass/TRAIN/A/0000/left/%s'%fn
rightfn = '/media/sf_Shared_Data/gpuhomedataset/FlyingThings3D_release/frames_cleanpass/TRAIN/A/0000/right/%s'%fn
leftimgdata = io.imread(leftfn)
rightimgdata = io.imread(rightfn)
mean, std, max = plot_hist(leftimgdata, save=False, filename=None, plot=True, color='r')
mean, std, max = plot_hist(realimgdata, save=False, filename=None, plot=True, color='b')
plt.show()
def extract_exception_of_occulution():
#occulution_list = 'CC_FlyingThings3D_release_TRAIN.list'
#occulution_list = './lists/CC_FlyingThings3D_release_TRAIN.list'
occulution_list = './lists/girl20_TRAIN.list'
img_pairs = []
with open(occulution_list, "r") as f:
img_pairs = f.readlines()
means = []
maxcount = 10000
i = 0
for f in img_pairs:
names = f.split()
name = names[2]
#gt_disp_name = os.path.join(DATAPATH, 'clean_dispnet', name)
gt_disp_name = os.path.join(DATAPATH, name)
if not os.path.isfile(gt_disp_name):
#print('Not found: ', gt_disp_name)
continue
gt_disp, scale = load_pfm(gt_disp_name)
mean = np.mean(gt_disp)
means.append(mean)
i+=1
if i > maxcount:
break
print('Name: ', name, ', Mean: ', mean, ', std: ', np.std(gt_disp), ' min: ', np.min(gt_disp), ' max: ', np.max(gt_disp))
np.save('virtualmean.log', np.array(means))
#mean, std, max = plot_hist(np.array(means), save=False, filename=None, plot=True, color='r')
#plt.show()
def parse_mean_log():
filename = './logs/meanstd_test.log'
f = open(filename, 'r')
means = []
fns = []
for line in f.readlines():
mean = line.split()[-4]
means.append(float(mean))
fns.append(line.split()[1])
means = np.array(means)
fns = np.array(fns)
k = 10
#sorted = np.argsort(means)[-k:]
sorted = np.argsort(means)[:k]
print(sorted)
print(means[sorted])
print(fns[sorted])
#plt.scatter(range(0, len(means)), means)
#plot_hist(np.array(means), plot=True)
#plt.show()
if __name__ == '__main__':
#statistic(FILELIST)
#statistic_with_file(RESULTLIST)
#fn='img00000.bmp'
#fn='0006.png'
#plot_hist_with_filename(fn)
#statistic_mean_std(FILELIST)
statistic_disparity(FILELIST)
#statistic_kitti_disparity(FILELIST)
#extract_exception_of_occulution()
#parse_mean_log()
| 37.086957 | 168 | 0.636044 |
998d4c432624cb536db530bd06e771556c57820a
| 6,586 |
py
|
Python
|
3D_Printed_Daft_Punk_Helmet/3D_Printed_Daft_Punk_Helmet-Side-Animation/code.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 665 |
2017-09-27T21:20:14.000Z
|
2022-03-31T09:09:25.000Z
|
3D_Printed_Daft_Punk_Helmet/3D_Printed_Daft_Punk_Helmet-Side-Animation/code.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 641 |
2017-10-03T19:46:37.000Z
|
2022-03-30T18:28:46.000Z
|
3D_Printed_Daft_Punk_Helmet/3D_Printed_Daft_Punk_Helmet-Side-Animation/code.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 734 |
2017-10-02T22:47:38.000Z
|
2022-03-30T14:03:51.000Z
|
# SPDX-FileCopyrightText: 2014 Phil Burgess for Adafruit Industries
#
# SPDX-License-Identifier: MIT
# Adafruit Trinket+NeoPixel animation for Daft Punk-inspired helmet.
# Contains some ATtiny85-specific stuff; won't run as-is on Uno, etc.
# Operates in HSV (hue, saturation, value) colorspace rather than RGB.
# Animation is an interference pattern between two waves; one controls
# saturation, the other controls value (brightness). The wavelength,
# direction, speed and type (square vs triangle wave) for each is randomly
# selected every few seconds. Hue is always linear, but other parameters
# are similarly randomized.
import random
import board
import neopixel
from analogio import AnalogIn
n_leds = 29 # number of LEDs per horn
led_pin = board.D0 # which pin your pixels are connected to
# initialize neopixel strip
pixels = neopixel.NeoPixel(led_pin, n_leds, brightness=1, auto_write=False)
count = 1 # countdown to next animation change
# Gamma-correction table
gamma = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2,
2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5,
5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10,
10, 10, 11, 11, 11, 12, 12, 13, 13, 13, 14, 14, 15, 15, 16, 16,
17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 24, 24, 25,
25, 26, 27, 27, 28, 29, 29, 30, 31, 32, 32, 33, 34, 35, 35, 36,
37, 38, 39, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 50,
51, 52, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 66, 67, 68,
69, 70, 72, 73, 74, 75, 77, 78, 79, 81, 82, 83, 85, 86, 87, 89,
90, 92, 93, 95, 96, 98, 99, 101, 102, 104, 105, 107, 109, 110,
112, 114, 115, 117, 119, 120, 122, 124, 126, 127, 129, 131, 133,
135, 137, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158,
160, 162, 164, 167, 169, 171, 173, 175, 177, 180, 182, 184, 186,
189, 191, 193, 196, 198, 200, 203, 205, 208, 210, 213, 215, 218,
220, 223, 225, 228, 231, 233, 236, 239, 241, 244, 247, 249, 252,
255
]
# initialize 3D list
wave = [0] * 5, [0] * 5, [0] * 5
wave_type = 0 # 0 = square wave, 1 = triangle wave
value_frame = 1 # start-of-frame value
value_pixel = 2 # pixel-to-pixel value
inc_frame = 3 # frame-to-frame increment
inc_pixel = 4 # pixel-to-pixel inc
wave_h = 0 # hue
wave_s = 1 # saturation
wave_v = 2 # brightness
# Random number generator is seeded from an unused 'floating'
# analog input - this helps ensure the random color choices
# aren't always the same order.
pin = AnalogIn(board.A0)
random.seed(pin.value)
pin.deinit()
# generate a non-zero random number for frame and pixel increments
def nz_random():
random_number = 0
while random_number <= 0:
random_number = random.randint(0,15) - 7
return random_number
while True:
w = i = n = s = v = r = g = b = v1 = s1 = 0
if count <= 0: # time for new animation
count = 250 + random.randint(0,250) # effect run for 5-10 sec.
for w in range(3): # three waves (H,S,V)
wave[w][wave_type] = random.randint(0,2)# square vs triangle
wave[w][inc_frame] = nz_random() # frame increment
wave[w][inc_pixel] = nz_random() # pixel increment
wave[w][value_pixel] = wave[w][value_frame]
wave[wave_s][inc_pixel] *= 16 # make saturation & value
wave[wave_v][inc_pixel] *= 16 # blinkier along strip
else: # continue animation
count -= 1
for w in range(3):
wave[w][value_frame] += wave[w][inc_frame]
wave[w][value_pixel] = wave[w][value_frame]
# Render current animation frame. COGNITIVE HAZARD: fixed point math.
for i in range(n_leds): # for each LED along strip...
# Coarse (8-bit) HSV-to-RGB conversion, hue first:
n = (wave[wave_h][value_pixel] % 43) * 6 # angle within sextant
sextant = wave[wave_h][value_pixel] / 43 # sextant number 0-5
# R to Y
if sextant == 0:
r = 255
g = n
b = 0
# Y to G
elif sextant == 1:
r = 254 - n
g = 255
b = 0
# G to C
elif sextant == 2:
r = 0
g = 255
b = n
# C to B
elif sextant == 3:
r = 0
g = 254 - n
b = 255
# B to M
elif sextant == 4:
r = n
g = 0
b = 255
# M to R
else:
r = 255
g = 0
b = 254 - n
# Saturation = 1-256 to allow >>8 instead of /255
s = wave[wave_s][value_pixel]
if wave[wave_s][wave_type]: # triangle wave?
if s & 0x80: # downslope
s = (s & 0x7F) << 1
s1 = 256 - s
else: # upslope
s = s<<1
s1 = 1 + s
s = 255 - s
else:
if s & 0x80: # square wave
s1 = 256 # 100% saturation
s = 0
else: # 0% saturation
s1 = 1
s = 255
# Value (brightness) = 1-256 for similar reasons
v = wave[wave_v][value_pixel]
# value (brightness) = 1-256 for similar reasons
if wave[wave_v][wave_type]: # triangle wave?
if v & 0x80: # downslope
v1 = 64 - ((v & 0x7F) << 1)
else: # upslope
v1 = 1 + (v << 1)
else:
if v & 0x80: # square wave; on/off
v1 = 256
else:
v1 = 1
# gamma rgb values
gr = ((((r * s1) >> 8) + s) * v1) >> 8
gg = ((((g * s1) >> 8) + s) * v1) >> 8
gb = ((((b * s1) >> 8) + s) * v1) >> 8
# gamma rgb indices range check
if -256 < gr < 256:
r = gamma[gr]
if -256 < gg < 256:
g = gamma[gg]
if -256 < gb < 256:
b = gamma[gb]
pixels[i] = (r, g, b)
# update wave values along length of strip (values may wrap, is OK!)
for w in range(3):
wave[w][value_pixel] += wave[w][inc_pixel]
pixels.show()
| 33.948454 | 85 | 0.4959 |
6077061e55392fc62721aa80b6bf3c5b134e14a4
| 43,916 |
py
|
Python
|
deepspeed/pt/deepspeed_light.py
|
jeffra/ghpages-test
|
d2500f2acfb6708efbd52e7857b92cc269a1ffa8
|
[
"MIT"
] | null | null | null |
deepspeed/pt/deepspeed_light.py
|
jeffra/ghpages-test
|
d2500f2acfb6708efbd52e7857b92cc269a1ffa8
|
[
"MIT"
] | null | null | null |
deepspeed/pt/deepspeed_light.py
|
jeffra/ghpages-test
|
d2500f2acfb6708efbd52e7857b92cc269a1ffa8
|
[
"MIT"
] | null | null | null |
'''
Copyright 2019 The Microsoft DeepSpeed Team
'''
import logging
import torch
import os
import torch.distributed as dist
from torch.nn.modules import Module
from tensorboardX import SummaryWriter
from deepspeed.pt.deepspeed_timer import ThroughputTimer, SynchronizedWallClockTimer
from deepspeed.pt.deepspeed_zero_optimizer import FP16_DeepSpeedZeroOptimizer
from deepspeed.pt.fp16_optimizer import FP16_Optimizer
from deepspeed.pt.fp16_unfused_optimizer import FP16_UnfusedOptimizer
from deepspeed.pt.deepspeed_fused_lamb import FusedLamb
from deepspeed.pt.deepspeed_config import DeepSpeedConfig, \
ADAM_OPTIMIZER, LAMB_OPTIMIZER, DEEPSPEED_OPTIMIZERS
from deepspeed.pt.deepspeed_dataloader import DeepSpeedDataLoader
from deepspeed.pt.deepspeed_constants import ROUTE_TRAIN, ROUTE_PREDICT, \
ROUTE_EVAL, TORCH_DISTRIBUTED_DEFAULT_PORT
import deepspeed.pt.deepspeed_lr_schedules as lr_schedules
from deepspeed.pt.deepspeed_csr_tensor import CSRTensor
MEMORY_OPT_ALLREDUCE_SIZE = 500000000
SUMMARY_WRITER_DIR_NAME = "JobId"
try:
from apex_C import flatten
from apex_C import unflatten
except ImportError:
try:
_ = warned_flatten
except NameError:
print(
"Warning: apex was installed without --cpp_ext. Falling back to Python flatten and unflatten."
)
warned_flatten = True
from torch._utils import _flatten_dense_tensors as flatten
from torch._utils import _unflatten_dense_tensors as unflatten
def split_half_float_double_csr(tensors):
dtypes = [
"torch.cuda.HalfTensor",
"torch.cuda.FloatTensor",
"torch.cuda.DoubleTensor",
CSRTensor.type()
]
buckets = []
for i, dtype in enumerate(dtypes):
bucket = [t for t in tensors if t.type() == dtype]
if bucket:
buckets.append((dtype, bucket))
return buckets
def _initialize_parameter_parallel_groups(parameter_parallel_size=None):
data_parallel_size = int(dist.get_world_size())
if parameter_parallel_size is None:
parameter_parallel_size = int(data_parallel_size)
print(data_parallel_size, parameter_parallel_size)
assert data_parallel_size % parameter_parallel_size == 0, \
'world size should be divisible by parameter parallel size'
rank = dist.get_rank()
my_group = None
for i in range(dist.get_world_size() // parameter_parallel_size):
ranks = range(i * parameter_parallel_size, (i + 1) * parameter_parallel_size)
group = torch.distributed.new_group(ranks)
if rank in ranks:
my_group = group
return my_group
def print_configuration(args, name):
print('{}:'.format(name), flush=True)
for arg in sorted(vars(args)):
dots = '.' * (29 - len(arg))
print(' {} {} {}'.format(arg, dots, getattr(args, arg)), flush=True)
class DeepSpeedLight(Module):
r"""DeepSpeed engine for training.
"""
def __init__(self,
args,
model,
optimizer=None,
model_parameters=None,
training_data=None,
lr_scheduler=None,
mpu=None,
dist_init_required=None,
collate_fn=None):
super(DeepSpeedLight, self).__init__()
logging.basicConfig(level=logging.INFO,
format="[%(levelname)s %(asctime)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
self.client_optimizer = optimizer
self.client_model_parameters = model_parameters
self.client_lr_scheduler = lr_scheduler
self.training_data = training_data
self.collate_fn = collate_fn
self.mpu = mpu
self.data_parallel_group = None
self.global_steps = 0
self.micro_steps = 0
self.skipped_steps = 0
self.gradient_predivide_factor = 1.0
self.gradient_average = True
self.warn_unscaled_loss = True
if dist_init_required is None:
dist_init_required = not dist.is_initialized()
self._mpi_check(args, dist_init_required)
self.dist_backend = "nccl"
if dist_init_required:
if not dist.is_initialized():
logging.info("Initializing torch distributed with backend: {}".format(
self.dist_backend))
dist.init_process_group(backend=self.dist_backend)
else:
logging.warning(
"Was given dist_init_required=True but detected that torch"
"distributed was already initialized, cannot initialize twice.")
self._do_args_sanity_check(args)
self._configure_with_arguments(args, mpu)
self._do_sanity_check()
self.sample_count = 0
if self.tensorboard_enabled():
self.summary_writer = self.get_summary_writer()
self._init_distributed(dist_init_required)
# Throughput timer
self.tput_timer = ThroughputTimer(
batch_size=self.train_micro_batch_size_per_gpu(),
num_workers=self.world_size,
monitor_memory=False)
self.training_dataloader = self.deepspeed_io(
training_data) if training_data else None
# Configure distributed model
self._configure_distributed_model(model)
# Configure optimizer and scheduler
self.optimizer = None
self.lr_scheduler = None
if model_parameters or optimizer:
self._configure_optimizer(optimizer, model_parameters)
self._configure_lr_scheduler(lr_scheduler)
self._report_progress(0)
# Configure wall clock timer
self.timers = SynchronizedWallClockTimer()
# Bookkeeping for csr support
self.csr_tensor_module_names = set()
if self.sparse_gradients_enabled():
for name, module in self.module.named_modules():
if isinstance(module, torch.nn.Embedding):
self.csr_tensor_module_names.add(name)
logging.info("Will convert {} to sparse (csr) "
"tensor during training".format(name))
self.save_non_zero_checkpoint = False
self.save_zero_checkpoint = False
self._configure_checkpointing(dist_init_required)
if self.global_rank == 0:
self._config.print('DeepSpeedLight configuration')
if self.dump_state():
print_configuration(self, 'DeepSpeedLight')
def _mpi_check(self, args, dist_init_required):
if hasattr(args, 'deepspeed_mpi') and args.deepspeed_mpi:
from mpi4py import MPI
import subprocess
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
world_size = comm.Get_size()
master_addr = None
if rank == 0:
hostname_cmd = ["hostname -I"]
result = subprocess.check_output(hostname_cmd, shell=True)
master_addr = result.decode('utf-8').split()[0]
master_addr = comm.bcast(master_addr, root=0)
# Determine local rank by assuming hostnames are unique
proc_name = MPI.Get_processor_name()
all_procs = comm.allgather(proc_name)
local_rank = sum([i == proc_name for i in all_procs[:rank]])
os.environ['RANK'] = str(rank)
os.environ['WORLD_SIZE'] = str(world_size)
args.local_rank = local_rank
os.environ['MASTER_ADDR'] = master_addr
os.environ['MASTER_PORT'] = TORCH_DISTRIBUTED_DEFAULT_PORT
logging.info(
"Discovered MPI settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}"
.format(os.environ['RANK'],
args.local_rank,
os.environ['WORLD_SIZE'],
os.environ['MASTER_ADDR'],
os.environ['MASTER_PORT']))
if not dist_init_required and dist.is_initialized():
assert dist.get_rank() == rank, "MPI rank {} does not match torch rank {}".format(rank, dist.get_rank())
assert dist.get_world_size() == world_size, "MPI world size {} does not match torch world size {}".format(world_size, dist.get_world_size())
def tensorboard_enabled(self):
return self._config.tensorboard_enabled
def tensorboard_output_path(self):
return self._config.tensorboard_output_path
def tensorboard_job_name(self):
return self._config.tensorboard_job_name
def get_summary_writer(self,
name="DeepSpeedJobName",
base=os.environ["HOME"] + "/tensorboard"):
if self.tensorboard_job_name():
name = self.tensorboard_job_name()
if self.tensorboard_output_path():
return SummaryWriter(log_dir=self.tensorboard_output_path())
if 'DLWS_JOB_ID' in os.environ:
SUMMARY_WRITER_DIR_NAME = os.environ['DLWS_JOB_ID'] + "/logs"
return SummaryWriter(log_dir=os.path.join(base, SUMMARY_WRITER_DIR_NAME, name))
def wall_clock_breakdown(self):
return self._config.wall_clock_breakdown
def sparse_gradients_enabled(self):
return self._config.sparse_gradients_enabled
def train_batch_size(self):
return self._config.train_batch_size
def train_micro_batch_size_per_gpu(self):
return self._config.train_micro_batch_size_per_gpu
def optimizer_name(self):
return self._config.optimizer_name
def optimizer_params(self):
return self._config.optimizer_params
def optimizer_legacy_fusion(self):
return self._config.optimizer_legacy_fusion
def scheduler_name(self):
return self._config.scheduler_name
def scheduler_params(self):
return self._config.scheduler_params
def zero_optimization(self):
return self._config.zero_enabled
def allgather_size(self):
return self._config.allgather_size
def fp16_enabled(self):
return self._config.fp16_enabled
def loss_scale(self):
return self._config.loss_scale
def gradient_accumulation_steps(self):
return self._config.gradient_accumulation_steps
def allreduce_always_fp32(self):
return self._config.allreduce_always_fp32
def postscale_gradients(self):
return not self._config.prescale_gradients
def steps_per_print(self):
return self._config.steps_per_print
def disable_allgather(self):
return self._config.disable_allgather
def dump_state(self):
return self._config.dump_state
def gradient_clipping(self):
return self._config.gradient_clipping
def dynamic_loss_scale(self):
return self._config.loss_scale == 0
def initial_dynamic_scale(self):
return self._config.initial_dynamic_scale
def dynamic_loss_scale_args(self):
return self._config.dynamic_loss_scale_args
def _configure_lr_scheduler(self, client_lr_scheduler):
# First check for scheduler in json configuration
lr_scheduler = self._scheduler_from_config(self.optimizer)
if lr_scheduler:
logging.info(
f'DeepSpeed using configured LR scheduler = {self.scheduler_name()}')
self.lr_scheduler = lr_scheduler
else:
logging.warning('DeepSpeed using client LR scheduler')
self.lr_scheduler = client_lr_scheduler
logging.info(f'DeepSpeed LR Scheduler = {self.lr_scheduler}')
def _configure_checkpointing(self, dist_init_required):
dp_rank = torch.distributed.get_rank(
) if self.mpu is None else self.mpu.get_data_parallel_rank()
#only the first data parallel process needs to store the model checkpoint
self.save_non_zero_checkpoint = True if dp_rank == 0 else False
if self.zero_optimization():
pp_rank = torch.distributed.get_rank(group=self.optimizer.dp_process_group)
#only the first parameter parallel process needs to store the optimizer state checkpoints for zero
self.save_zero_checkpoint = True if pp_rank == dp_rank else False
def _scheduler_from_config(self, optimizer):
scheduler_name = self.scheduler_name()
if scheduler_name is not None:
if hasattr(lr_schedules, scheduler_name):
scheduler = getattr(lr_schedules, scheduler_name)
else:
assert hasattr(torch.optim.lr_scheduler, scheduler_name), \
f"DeepSpeed does not recognize LR scheduler {scheduler_name}"
scheduler = getattr(torch.optim.lr_scheduler, scheduler_name)
scheduler_params = self.scheduler_params()
instantiated_scheduler = scheduler(optimizer, **scheduler_params)
return instantiated_scheduler
else:
return None
def _init_distributed(self, dist_init_required):
if self.local_rank >= 0:
torch.cuda.set_device(self.local_rank)
self.device = torch.device("cuda", self.local_rank)
self.world_size = dist.get_world_size()
self.global_rank = dist.get_rank()
logging.info("Set device to local rank {} within node.".format(
self.local_rank))
else:
self.world_size = 1
self.global_rank = 0
self.device = torch.device("cuda")
# Configure based on command line arguments
def _configure_with_arguments(self, args, mpu):
self.local_rank = args.local_rank if hasattr(args, 'local_rank') else 0
self._config = DeepSpeedConfig(args.deepspeed_config, mpu)
# Validate command line arguments
def _do_args_sanity_check(self, args):
if hasattr(args, 'deepscale_config') and args.deepscale_config is not None:
logging.warning(
"************ --deepscale_config is deprecated, please use --deepspeed_config ************"
)
if hasattr(args, 'deepspeed_config'):
assert args.deepspeed_config is None, "Not sure how to proceed, we were given both a deepscale_config and deepspeed_config"
args.deepspeed_config = args.deepscale_config
assert hasattr(args, 'local_rank') and type(args.local_rank) == int, \
'DeepSpeed requires integer command line parameter --local_rank'
assert hasattr(args, 'deepspeed_config') and args.deepspeed_config is not None, \
'DeepSpeed requires --deepspeed_config to specify configuration file'
assert os.path.isfile(args.deepspeed_config), \
'DeepSpeed configuration file: {} is not an existing file'.format(args.deepspeed_config)
def _is_supported_optimizer(self, optimizer_name):
return optimizer_name in DEEPSPEED_OPTIMIZERS or \
getattr(torch.optim, optimizer_name, None) is not None
# Validate configuration based on command line arguments
def _do_sanity_check(self):
if not self.client_optimizer:
assert self._is_supported_optimizer(self.optimizer_name()), \
'{} is not a supported DeepSpeed Optimizer'.format(self.optimizer_name())
assert self.client_model_parameters, \
'DeepSpeed {} optimizer requires parameters in initialize() call'.format(self.optimizer_name())
if self.optimizer_name() == LAMB_OPTIMIZER:
assert self.dynamic_loss_scale(), \
'DeepSpeed {} optimizer requires dynamic loss scaling'.format(self.optimizer_name())
def _configure_distributed_model(self, model):
self.module = model
if self.fp16_enabled():
self.module.half()
self.module.to(self.device)
if self.mpu is None:
self.data_parallel_group = _initialize_parameter_parallel_groups()
self.dp_world_size = dist.get_world_size()
src_rank = 0
else:
self.data_parallel_group = self.mpu.get_data_parallel_group()
self.dp_world_size = self.mpu.get_data_parallel_world_size()
src_rank = self.mpu.get_model_parallel_rank()
for p in self.module.parameters():
if torch.is_tensor(p):
dist.broadcast(p, src_rank, group=self.data_parallel_group)
# TODO: support new AMP optimizer
# self.module.half()
# self.module.to(self.local_rank)
#self.module, self.optimizer = amp.initialize(self.module, self.optimizer, opt_level="O2")
# Configure optimizer
def _configure_optimizer(self, client_optimizer, model_parameters):
if client_optimizer is not None:
basic_optimizer = client_optimizer
logging.info('Using client Optimizer as basic optimizer')
else:
basic_optimizer = self._configure_basic_optimizer(model_parameters)
logging.info(
'Using DeepSpeed Optimizer param name {} as basic optimizer'.format(
self.optimizer_name()))
logging.info('DeepSpeed Basic Optimizer = {}'.format(basic_optimizer))
if self.zero_optimization() and self.optimizer_name() == ADAM_OPTIMIZER:
self.optimizer = self._configure_zero_optimizer(basic_optimizer)
elif self.fp16_enabled():
self.optimizer = self._configure_fp16_optimizer(basic_optimizer)
else:
self.optimizer = basic_optimizer
# logging.info('DeepSpeed Final Optimizer = {}'.format(self.optimizer.state_dict()))
def _configure_basic_optimizer(self, model_parameters):
optimizer_parameters = self.optimizer_params()
if self.fp16_enabled() and 'max_grad_norm' in optimizer_parameters.keys():
optimizer_parameters['max_grad_norm'] = 0.0
if self.optimizer_name() == ADAM_OPTIMIZER:
from apex.optimizers.fused_adam import FusedAdam
optimizer = FusedAdam(model_parameters, **optimizer_parameters)
elif self.optimizer_name() == LAMB_OPTIMIZER:
optimizer = FusedLamb(model_parameters, **optimizer_parameters)
else:
torch_optimizer = getattr(torch.optim, self.optimizer_name())
optimizer = torch_optimizer(model_parameters, **optimizer_parameters)
return optimizer
def _configure_fp16_optimizer(self, optimizer):
initial_dynamic_scale = self.initial_dynamic_scale()
dynamic_loss_args = self.dynamic_loss_scale_args()
clip_grad = self.gradient_clipping()
if self.optimizer_name() == ADAM_OPTIMIZER:
if self.dynamic_loss_scale():
logging.info('Creating fp16 optimizer with dynamic loss scale')
optimizer = FP16_Optimizer(
optimizer,
dynamic_loss_scale=True,
initial_dynamic_scale=initial_dynamic_scale,
dynamic_loss_args=dynamic_loss_args,
mpu=self.mpu,
clip_grad=clip_grad,
fused_adam_legacy=self.optimizer_legacy_fusion())
else:
logging.info('Creating fp16 optimizer with static loss scale: {}'.format(
self.loss_scale()))
optimizer = FP16_Optimizer(
optimizer,
static_loss_scale=self.loss_scale(),
mpu=self.mpu,
clip_grad=clip_grad,
fused_adam_legacy=self.optimizer_legacy_fusion())
else:
logging.info('Creating fp16 unfused optimizer with dynamic loss scale')
optimizer = FP16_UnfusedOptimizer(
optimizer,
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=dynamic_loss_args,
mpu=self.mpu,
clip_grad=clip_grad,
fused_lamb_legacy=self.optimizer_legacy_fusion()
if self.optimizer_name() == LAMB_OPTIMIZER else False)
return optimizer
def _configure_zero_optimizer(self, optimizer):
logging.info('Creating fp16 zero optimizer')
optimizer = FP16_DeepSpeedZeroOptimizer(
optimizer,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=self.dynamic_loss_scale_args(),
dp_process_group=self.data_parallel_group,
clip_grad=self.gradient_clipping(),
all_gather_partitions=not self.disable_allgather(),
allgather_size=self.allgather_size(),
mpu=self.mpu)
return optimizer
def deepspeed_io(self,
dataset,
batch_size=None,
route=ROUTE_TRAIN,
pin_memory=True,
data_sampler=None,
collate_fn=None,
num_local_io_workers=None):
if not isinstance(dataset, torch.utils.data.Dataset):
raise ValueError("Training data must be a torch Dataset")
if data_sampler is None and (route == ROUTE_PREDICT or route == ROUTE_EVAL):
data_sampler = torch.utils.data.SequentialSampler(dataset)
if batch_size is None:
batch_size = self.train_micro_batch_size_per_gpu()
if collate_fn is None:
collate_fn = self.collate_fn
# Currently we only use timer in train route
deepspeed_io_timer = None
if route == ROUTE_TRAIN:
deepspeed_io_timer = self.tput_timer
return DeepSpeedDataLoader(dataset=dataset,
batch_size=batch_size,
pin_memory=pin_memory,
collate_fn=collate_fn,
local_rank=self.local_rank,
tput_timer=deepspeed_io_timer,
num_local_io_workers=num_local_io_workers,
data_sampler=data_sampler)
def train(self):
r"""
"""
self.warn_unscaled_loss = True
self.module.train()
def eval(self):
r"""
"""
self.warn_unscaled_loss = True
self.module.train(False)
def _scale_loss(self, loss):
if isinstance(loss, torch.Tensor):
loss = loss / self.gradient_accumulation_steps()
elif isinstance(loss, tuple) and isinstance(loss[0], torch.Tensor):
loss = (l / self.gradient_accumulation_steps() for l in loss)
elif isinstance(loss, list) and isinstance(loss[0], torch.Tensor):
loss = [l / self.gradient_accumulation_steps() for l in loss]
else:
if self.warn_unscaled_loss:
logging.warning(
f'DeepSpeed unable to scale loss because of type: {type(loss)}')
self.warn_unscaled_loss = False
return loss
def forward(self, *inputs, **kwargs):
r"""Execute forward propagation
Arguments:
*inputs: Variable length input list
**kwargs: variable length keyword arguments
"""
if self.wall_clock_breakdown():
self.timers('forward_microstep').start()
self.timers('forward').start()
if self.training_dataloader is None:
self.tput_timer.start()
loss = self.module(*inputs, **kwargs)
# scale loss w.r.t. gradient accumulation if needed
if self.gradient_accumulation_steps() > 1:
loss = self._scale_loss(loss)
if self.wall_clock_breakdown():
self.timers('forward').stop()
self.timers('forward_microstep').stop()
return loss
def allreduce_gradients(self, bucket_size=MEMORY_OPT_ALLREDUCE_SIZE):
if self.is_gradient_accumulation_boundary():
self.buffered_allreduce_fallback(elements_per_buffer=bucket_size)
def backward(self, loss, allreduce_gradients=True):
r"""Execute backward pass on the loss
Arguments:
loss: Torch tensor on which to execute backward propagation
allreduce_gradients: If this is False, then gradient averaging will be skipped. Default is True.
"""
if self.is_gradient_accumulation_boundary() and self.tensorboard_enabled(
) and torch.distributed.get_rank(
) == 0: # deepspeed tensorboard support for loss
self.sample_count += (self.train_micro_batch_size_per_gpu() *
torch.distributed.get_world_size() *
self.gradient_accumulation_steps())
self.summary_events = [
(f'Train/Samples/train_loss',
loss.mean().item() * self.gradient_accumulation_steps(),
self.sample_count)
]
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
self.summary_writer.flush()
if self.wall_clock_breakdown():
self.timers('backward_microstep').start()
self.timers('backward').start()
assert self.optimizer is not None, "must provide optimizer during " \
"init in order to use backward"
if self.wall_clock_breakdown():
self.timers('backward_inner_microstep').start()
self.timers('backward_inner').start()
if self.zero_optimization():
self.optimizer.backward(loss)
elif self.fp16_enabled():
self.optimizer.backward(loss)
# TODO: Use new AMP semantics as below
# with amp.scale_loss(loss, self.optimizer) as scaled_loss:
# scaled_loss.backward()
else:
loss.backward()
if self.wall_clock_breakdown():
self.timers('backward_inner').stop()
self.timers('backward_inner_microstep').stop()
if self.wall_clock_breakdown():
self.timers('backward_allreduce_microstep').start()
self.timers('backward_allreduce').start()
if allreduce_gradients:
self.allreduce_gradients()
if self.wall_clock_breakdown():
self.timers('backward_allreduce').stop()
self.timers('backward_allreduce_microstep').stop()
self.timers('backward').stop()
self.timers('backward_microstep').stop()
def is_gradient_accumulation_boundary(self):
return (self.micro_steps + 1) % \
self.gradient_accumulation_steps() == 0
def step(self):
r"""Execute the weight update step after forward and backward propagation on effective_train_batch
"""
if self.wall_clock_breakdown():
self.timers('step_microstep').start()
self.timers('step').start()
assert self.optimizer is not None, "must provide optimizer during " \
"init in order to use step"
report_progress = self.global_rank == 0 if self.global_rank else True
if self.is_gradient_accumulation_boundary():
self.optimizer.step()
self.optimizer.zero_grad()
# Check overlow here since in DS fp16 optimizer, the overflow is updated in above step() function.
overflow = False
if hasattr(self.optimizer, 'overflow'):
overflow = self.optimizer.overflow
if overflow:
self.skipped_steps += 1
else:
if self.lr_scheduler is not None:
self.lr_scheduler.step()
if report_progress and (self.global_steps +
1) % self.steps_per_print() == 0:
self._report_progress(self.global_steps + 1)
self.global_steps += 1
self.tput_timer.stop(report_progress)
if self.is_gradient_accumulation_boundary() and self.tensorboard_enabled(
) and torch.distributed.get_rank() == 0: # deepspeed tensorboard support for lr
self.summary_events = [(f'Train/Samples/lr',
self.get_lr()[0],
self.sample_count)]
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
self.summary_writer.flush()
if self.wall_clock_breakdown():
self.timers('step').stop()
self.timers('step_microstep').stop()
self.timers.log([
'forward_microstep',
'backward_microstep',
'backward_inner_microstep',
'backward_allreduce_microstep',
'step_microstep'
])
if self.is_gradient_accumulation_boundary():
if self.tensorboard_enabled() and torch.distributed.get_rank(
) == 0: # this is done before the log because log resets timers
self.summary_events = [(f'Train/Samples/elapsed_time_ms_forward', self.timers('forward').elapsed(reset=False) * 1000.0, self.sample_count), \
(f'Train/Samples/elapsed_time_ms_backward', self.timers('backward').elapsed(reset=False) * 1000.0, self.sample_count), \
(f'Train/Samples/elapsed_time_ms_backward_inner', self.timers('backward_inner').elapsed(reset=False) * 1000.0, self.sample_count), \
(f'Train/Samples/elapsed_time_ms_backward_allreduce', self.timers('backward_allreduce').elapsed(reset=False) * 1000.0, self.sample_count), \
(f'Train/Samples/elapsed_time_ms_step', self.timers('step').elapsed(reset=False) * 1000.0, self.sample_count)
]
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
self.summary_writer.flush()
self.timers.log([
'forward',
'backward',
'backward_inner',
'backward_allreduce',
'step'
])
self.micro_steps += 1
def _get_optimizer_param(self, param_name):
result = []
if not self.optimizer:
return result
for group in self.optimizer.param_groups:
if param_name in group:
result.append(group[param_name])
else:
result.append(0.0)
return result
def get_lr(self):
return self._get_optimizer_param('lr')
def get_mom(self):
return self._get_optimizer_param('betas')
def _report_progress(self, step):
lr = self.get_lr()
mom = self.get_mom()
logging.info('rank:{} step={}, skipped={}, lr={}, mom={}'.format(
self.global_rank,
step,
self.skipped_steps,
lr,
mom))
def allreduce_bucket(self, bucket):
tensor = flatten(bucket)
tensor_to_allreduce = tensor
if self.allreduce_always_fp32():
tensor_to_allreduce = tensor.float()
if self.postscale_gradients():
if self.gradient_predivide_factor != 1.0:
tensor_to_allreduce.mul_(1. / self.gradient_predivide_factor)
dist.all_reduce(tensor_to_allreduce, group=self.data_parallel_group)
if self.gradient_average:
if self.gradient_predivide_factor != self.dp_world_size:
tensor_to_allreduce.mul_(self.gradient_predivide_factor /
self.dp_world_size)
else:
tensor_to_allreduce.div_(self.dp_world_size)
dist.all_reduce(tensor_to_allreduce, group=self.data_parallel_group)
if self.allreduce_always_fp32() and tensor is not tensor_to_allreduce:
tensor.copy_(tensor_to_allreduce)
return tensor
def allreduce_and_copy(self, small_bucket):
allreduced = self.allreduce_bucket(small_bucket)
for buf, synced in zip(small_bucket, unflatten(allreduced, small_bucket)):
buf.copy_(synced)
def allreduce_no_retain(self, bucket, numel_per_bucket=500000000):
small_bucket = []
numel = 0
for tensor in bucket:
small_bucket.append(tensor)
numel = numel + tensor.numel()
if numel > numel_per_bucket:
self.allreduce_and_copy(small_bucket)
small_bucket = []
if len(small_bucket) > 0:
self.allreduce_and_copy(small_bucket)
def buffered_allreduce_fallback(self, grads=None, elements_per_buffer=500000000):
grads = []
for param_name, param in self.module.named_parameters():
if param.grad is not None:
grad_data = param.grad.data
param_name_root = param_name.split('.', 1)[0]
if self.sparse_gradients_enabled(
) and param_name_root in self.csr_tensor_module_names:
grads.append(CSRTensor(grad_data))
else:
grads.append(grad_data)
split_buckets = split_half_float_double_csr(grads)
for i, bucket_tuple in enumerate(split_buckets):
bucket_type, bucket = bucket_tuple
if bucket_type == CSRTensor.type():
self.csr_allreduce_no_retain(bucket)
else:
self.allreduce_no_retain(bucket, numel_per_bucket=elements_per_buffer)
def csr_allreduce_no_retain(self, bucket):
allreduced_csrs = self.csr_allreduce_bucket(bucket)
# Densify csr tensor and copy back to original location
for csr in allreduced_csrs:
dense_tensor = csr.to_dense()
csr.orig_dense_tensor.copy_(dense_tensor)
def csr_allreduce_bucket(self, bucket):
csr_list = []
for csr in bucket:
csr_list.append(self.csr_allreduce(csr))
return csr_list
def csr_allreduce(self, csr):
# Pre-divide for fp16 stability
csr.values.div_(self.dp_world_size)
indices_device_list = self.csr_all_gather(csr.indices)
values_device_list = self.csr_all_gather(csr.values)
csr.indices = torch.cat(indices_device_list)
csr.values = torch.cat(values_device_list)
return csr
def csr_all_gather(self, value):
my_size = torch.LongTensor([value.size()[0]]).cuda()
all_sizes = self.all_gather_scalar(my_size)
max_size = torch.cat(all_sizes).max()
fill_size = (max_size - my_size)
assert value.dim() in [1, 2]
if value.dim() == 1:
if fill_size > 0:
value = torch.cat([value, value.new_zeros(fill_size)])
tensor_list = [
value.new_zeros(max_size) for _ in range(dist.get_world_size())
]
else:
if fill_size > 0:
value = torch.cat([value, value.new_zeros(fill_size, value.size()[1])])
tensor_list = [
value.new_zeros(max_size,
value.size()[1]) for _ in range(dist.get_world_size())
]
dist.all_gather(tensor_list, value, group=self.data_parallel_group)
tensors = []
for dev_idx, t in enumerate(tensor_list):
size = all_sizes[dev_idx][0]
tensors.append(t.index_select(0, torch.LongTensor(range(size)).cuda()))
return tensors
def all_gather_scalar(self, value):
tensor_list = [value.new_zeros(value.size()) for _ in range(self.dp_world_size)]
dist.all_gather(tensor_list, value, group=self.data_parallel_group)
return tensor_list
def module_state_dict(self, destination=None, prefix='', keep_vars=False):
sd = self.module.state_dict(destination, prefix, keep_vars)
return sd
def load_module_state_dict(self, state_dict, strict=True):
self.module.load_state_dict(state_dict, strict=strict)
def _get_zero_ckpt_name(self, checkpoints_path, tag):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
pp_rank = torch.distributed.get_rank(group=self.optimizer.dp_process_group)
filename = 'zero_pp_rank_{}'.format(pp_rank)
zero_ckpt_name = os.path.join(
checkpoints_path,
str(tag),
filename + '_mp_rank_{:02d}'.format(mp_rank) + 'optim_states.pt')
return zero_ckpt_name
def _get_ckpt_name(self, checkpoints_path, tag):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
ckpt_name = os.path.join(checkpoints_path,
str(tag),
'mp_rank_{:02d}'.format(mp_rank) + '_model_states.pt')
return ckpt_name
def _ensure_directory_exists(self, filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
def load_checkpoint(self, load_dir, tag, load_optimizer_states=True):
r"""Load training checkpoint
Arguments:
load_dir: Required. Directory to load the checkpoint from
tag: Required. Checkpoint tag used as a unique identifier for the checkpoint. Ex. Global Step.
load_optimizer_states: Optional. Boolean to load the training optimizer states from Checkpoint. Ex. ADAM's momentum and variance
Return:
load_path: Path of the loaded checkpoint. None if loading the checkpoint failed
client_state: State dictionary used for loading required training states in the client code.
"""
load_path, client_states = self._load_checkpoint(load_dir, tag, load_optimizer_states=load_optimizer_states)
if self.zero_optimization() and load_path is not None:
self._load_zero_checkpoint(load_dir,
tag,
load_optimizer_states=load_optimizer_states)
return load_path, client_states
def _load_checkpoint(self, load_dir, tag, load_optimizer_states=True):
load_path = self._get_ckpt_name(load_dir, tag)
if not os.path.exists(load_path):
logging.warn(
'Client provided checkpoint load path: {} does not exist ... skip checkpoint load'
.format(load_path))
return None, None
logging.info('Loading checkpoint: {}'.format(load_path))
checkpoint = torch.load(load_path, map_location=lambda storage, loc: storage)
self.load_module_state_dict(checkpoint['module'])
if not self.zero_optimization():
self.optimizer.load_state_dict(checkpoint['optimizer'],
load_optimizer_states=load_optimizer_states)
if self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
self.csr_tensor_module_names = checkpoint['csr_tensor_module_names']
self.global_steps = checkpoint['global_steps']
self.skipped_steps = checkpoint['skipped_steps']
deepspeed_states = [
'module',
'optimizer',
'csr_tensor_module_names',
'skipped_steps',
'global_steps'
]
client_state = {
key: value
for key,
value in checkpoint.items() if not key in deepspeed_states
}
return load_path, client_state
def _load_zero_checkpoint(self, load_dir, tag, load_optimizer_states=True):
zero_checkpoint_name = self._get_zero_ckpt_name(load_dir, tag)
if not os.path.exists(zero_checkpoint_name):
logging.warn(
'Client provided checkpoint load path: {} does not exist ... skip checkpoint load'
.format(zero_checkpoint_name))
return None
zero_sd = torch.load(zero_checkpoint_name, map_location='cpu')
self.optimizer.load_state_dict(zero_sd['optimizer_state_dict'],
load_optimizer_states=load_optimizer_states)
logging.info('loading zero checkpoint {}'.format(zero_checkpoint_name))
def save_checkpoint(self, save_dir, tag, client_state={}):
r"""Save training checkpoint
Arguments:
save_dir: Required. Directory for saving the checkpoint
tag: Required. Checkpoint tag used as a unique identifier for the checkpoint. Ex. Global Step.
client_state: Optional. State dictionary used for saving required training states in the client code.
"""
#This is to make sure the checkpoint names are created without collision
#There seems to be issue creating them in parallel
self._create_checkpoint_files(save_dir, tag)
try:
if self.save_non_zero_checkpoint:
self._save_checkpoint(save_dir, tag, client_state=client_state)
if self.save_zero_checkpoint:
self._save_zero_checkpoint(save_dir, tag)
except:
logging.error(f'Failed Saving model checkpoint to {save_dir} with tag {tag}')
return False
return True
def _create_checkpoint_files(self, save_dir, tag):
#checkpoint files are created sequentially
for rank in range(dist.get_world_size()):
if rank == dist.get_rank():
try:
if self.save_non_zero_checkpoint:
checkpoint_name = self._get_ckpt_name(save_dir, tag)
self._ensure_directory_exists(checkpoint_name)
if self.save_zero_checkpoint:
checkpoint_name = self._get_zero_ckpt_name(save_dir, tag)
self._ensure_directory_exists(checkpoint_name)
except:
logging.error(
f'Failed Saving model checkpoint to {save_dir} with tag {tag}')
return False
dist.barrier()
def _save_checkpoint(self, save_dir, tag, client_state={}):
save_path = self._get_ckpt_name(save_dir, tag)
#self._ensure_directory_exists(save_path)
state = {
'module':
self.module_state_dict(),
'optimizer':
self.optimizer.state_dict()
if self.optimizer and not self.zero_optimization() else None,
'lr_scheduler':
self.lr_scheduler.state_dict() if self.lr_scheduler is not None else None,
'csr_tensor_module_names':
self.csr_tensor_module_names,
'skipped_steps':
self.skipped_steps,
'global_steps':
self.global_steps,
}
state.update(client_state)
logging.info('Saving model checkpoint: {}'.format(save_path))
torch.save(state, save_path)
def _save_zero_checkpoint(self, save_path, tag):
try:
zero_checkpoint_name = self._get_zero_ckpt_name(save_path, tag)
#self._ensure_directory_exists(zero_checkpoint_name)
except:
logging.error(
f'Failed Saving Zero model checkpoint to {save_path} with tag {tag}')
zero_sd = {'optimizer_state_dict': self.optimizer.state_dict()}
torch.save(zero_sd, zero_checkpoint_name)
logging.info('zero checkpoint saved {}'.format(zero_checkpoint_name))
| 40.363971 | 184 | 0.624442 |
32741969ce0760cbc251d20eaf24499d22b6b2e6
| 27,649 |
py
|
Python
|
tests/unit/modules/test_systemd_service.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 2 |
2020-11-02T22:08:26.000Z
|
2020-11-14T13:44:46.000Z
|
tests/unit/modules/test_systemd_service.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 4 |
2021-02-06T14:30:48.000Z
|
2021-12-13T20:50:10.000Z
|
tests/unit/modules/test_systemd_service.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 2 |
2020-11-04T06:32:02.000Z
|
2020-11-06T11:01:18.000Z
|
# -*- coding: utf-8 -*-
"""
:codeauthor: Rahul Handay <[email protected]>
"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import pytest
# Import Salt Libs
import salt.modules.systemd_service as systemd
import salt.utils.systemd
from salt.exceptions import CommandExecutionError
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
# Import Salt Testing Libs
from tests.support.unit import TestCase
_SYSTEMCTL_STATUS = {
"sshd.service": {
"stdout": """\
* sshd.service - OpenSSH Daemon
Loaded: loaded (/usr/lib/systemd/system/sshd.service; disabled; vendor preset: disabled)
Active: inactive (dead)""",
"stderr": "",
"retcode": 3,
"pid": 12345,
},
"foo.service": {
"stdout": """\
* foo.service
Loaded: not-found (Reason: No such file or directory)
Active: inactive (dead)""",
"stderr": "",
"retcode": 3,
"pid": 12345,
},
}
# This reflects systemd >= 231 behavior
_SYSTEMCTL_STATUS_GTE_231 = {
"bar.service": {
"stdout": "Unit bar.service could not be found.",
"stderr": "",
"retcode": 4,
"pid": 12345,
},
}
_LIST_UNIT_FILES = """\
service1.service enabled
service2.service disabled
service3.service static
timer1.timer enabled
timer2.timer disabled
timer3.timer static"""
class SystemdTestCase(TestCase, LoaderModuleMockMixin):
"""
Test case for salt.modules.systemd
"""
def setup_loader_modules(self):
return {systemd: {}}
def test_systemctl_reload(self):
"""
Test to Reloads systemctl
"""
mock = MagicMock(
side_effect=[
{"stdout": "Who knows why?", "stderr": "", "retcode": 1, "pid": 12345},
{"stdout": "", "stderr": "", "retcode": 0, "pid": 54321},
]
)
with patch.dict(systemd.__salt__, {"cmd.run_all": mock}):
self.assertRaisesRegex(
CommandExecutionError,
"Problem performing systemctl daemon-reload: Who knows why?",
systemd.systemctl_reload,
)
self.assertTrue(systemd.systemctl_reload())
def test_get_enabled(self):
"""
Test to return a list of all enabled services
"""
cmd_mock = MagicMock(return_value=_LIST_UNIT_FILES)
listdir_mock = MagicMock(return_value=["foo", "bar", "baz", "README"])
sd_mock = MagicMock(
return_value=set([x.replace(".service", "") for x in _SYSTEMCTL_STATUS])
)
access_mock = MagicMock(
side_effect=lambda x, y: x
!= os.path.join(systemd.INITSCRIPT_PATH, "README")
)
sysv_enabled_mock = MagicMock(side_effect=lambda x, _: x == "baz")
with patch.dict(systemd.__salt__, {"cmd.run": cmd_mock}):
with patch.object(os, "listdir", listdir_mock):
with patch.object(systemd, "_get_systemd_services", sd_mock):
with patch.object(os, "access", side_effect=access_mock):
with patch.object(systemd, "_sysv_enabled", sysv_enabled_mock):
self.assertListEqual(
systemd.get_enabled(),
["baz", "service1", "timer1.timer"],
)
def test_get_disabled(self):
"""
Test to return a list of all disabled services
"""
cmd_mock = MagicMock(return_value=_LIST_UNIT_FILES)
# 'foo' should collide with the systemd services (as returned by
# sd_mock) and thus not be returned by _get_sysv_services(). It doesn't
# matter that it's not part of the _LIST_UNIT_FILES output, we just
# want to ensure that 'foo' isn't identified as a disabled initscript
# even though below we are mocking it to show as not enabled (since
# only 'baz' will be considered an enabled sysv service).
listdir_mock = MagicMock(return_value=["foo", "bar", "baz", "README"])
sd_mock = MagicMock(
return_value=set([x.replace(".service", "") for x in _SYSTEMCTL_STATUS])
)
access_mock = MagicMock(
side_effect=lambda x, y: x
!= os.path.join(systemd.INITSCRIPT_PATH, "README")
)
sysv_enabled_mock = MagicMock(side_effect=lambda x, _: x == "baz")
with patch.dict(systemd.__salt__, {"cmd.run": cmd_mock}):
with patch.object(os, "listdir", listdir_mock):
with patch.object(systemd, "_get_systemd_services", sd_mock):
with patch.object(os, "access", side_effect=access_mock):
with patch.object(systemd, "_sysv_enabled", sysv_enabled_mock):
self.assertListEqual(
systemd.get_disabled(),
["bar", "service2", "timer2.timer"],
)
def test_get_all(self):
"""
Test to return a list of all available services
"""
listdir_mock = MagicMock(
side_effect=[
["foo.service", "multi-user.target.wants", "mytimer.timer"],
[],
["foo.service", "multi-user.target.wants", "bar.service"],
["mysql", "nginx", "README"],
]
)
access_mock = MagicMock(
side_effect=lambda x, y: x
!= os.path.join(systemd.INITSCRIPT_PATH, "README")
)
with patch.object(os, "listdir", listdir_mock):
with patch.object(os, "access", side_effect=access_mock):
self.assertListEqual(
systemd.get_all(), ["bar", "foo", "mysql", "mytimer.timer", "nginx"]
)
def test_available(self):
"""
Test to check that the given service is available
"""
mock = MagicMock(side_effect=lambda x: _SYSTEMCTL_STATUS[x])
# systemd < 231
with patch.dict(systemd.__context__, {"salt.utils.systemd.version": 230}):
with patch.object(systemd, "_systemctl_status", mock):
self.assertTrue(systemd.available("sshd.service"))
self.assertFalse(systemd.available("foo.service"))
# systemd >= 231
with patch.dict(systemd.__context__, {"salt.utils.systemd.version": 231}):
with patch.dict(_SYSTEMCTL_STATUS, _SYSTEMCTL_STATUS_GTE_231):
with patch.object(systemd, "_systemctl_status", mock):
self.assertTrue(systemd.available("sshd.service"))
self.assertFalse(systemd.available("bar.service"))
# systemd < 231 with retcode/output changes backported (e.g. RHEL 7.3)
with patch.dict(systemd.__context__, {"salt.utils.systemd.version": 219}):
with patch.dict(_SYSTEMCTL_STATUS, _SYSTEMCTL_STATUS_GTE_231):
with patch.object(systemd, "_systemctl_status", mock):
self.assertTrue(systemd.available("sshd.service"))
self.assertFalse(systemd.available("bar.service"))
def test_missing(self):
"""
Test to the inverse of service.available.
"""
mock = MagicMock(side_effect=lambda x: _SYSTEMCTL_STATUS[x])
# systemd < 231
with patch.dict(systemd.__context__, {"salt.utils.systemd.version": 230}):
with patch.object(systemd, "_systemctl_status", mock):
self.assertFalse(systemd.missing("sshd.service"))
self.assertTrue(systemd.missing("foo.service"))
# systemd >= 231
with patch.dict(systemd.__context__, {"salt.utils.systemd.version": 231}):
with patch.dict(_SYSTEMCTL_STATUS, _SYSTEMCTL_STATUS_GTE_231):
with patch.object(systemd, "_systemctl_status", mock):
self.assertFalse(systemd.missing("sshd.service"))
self.assertTrue(systemd.missing("bar.service"))
# systemd < 231 with retcode/output changes backported (e.g. RHEL 7.3)
with patch.dict(systemd.__context__, {"salt.utils.systemd.version": 219}):
with patch.dict(_SYSTEMCTL_STATUS, _SYSTEMCTL_STATUS_GTE_231):
with patch.object(systemd, "_systemctl_status", mock):
self.assertFalse(systemd.missing("sshd.service"))
self.assertTrue(systemd.missing("bar.service"))
def test_show(self):
"""
Test to show properties of one or more units/jobs or the manager
"""
show_output = "a=b\nc=d\ne={ f=g ; h=i }\nWants=foo.service bar.service\n"
mock = MagicMock(return_value=show_output)
with patch.dict(systemd.__salt__, {"cmd.run": mock}):
self.assertDictEqual(
systemd.show("sshd"),
{
"a": "b",
"c": "d",
"e": {"f": "g", "h": "i"},
"Wants": ["foo.service", "bar.service"],
},
)
def test_execs(self):
"""
Test to return a list of all files specified as ``ExecStart`` for all
services
"""
mock = MagicMock(return_value=["a", "b"])
with patch.object(systemd, "get_all", mock):
mock = MagicMock(return_value={"ExecStart": {"path": "c"}})
with patch.object(systemd, "show", mock):
self.assertDictEqual(systemd.execs(), {"a": "c", "b": "c"})
class SystemdScopeTestCase(TestCase, LoaderModuleMockMixin):
"""
Test case for salt.modules.systemd, for functions which use systemd
scopes
"""
def setup_loader_modules(self):
return {systemd: {}}
unit_name = "foo"
mock_none = MagicMock(return_value=None)
mock_success = MagicMock(return_value=0)
mock_failure = MagicMock(return_value=1)
mock_true = MagicMock(return_value=True)
mock_false = MagicMock(return_value=False)
mock_empty_list = MagicMock(return_value=[])
mock_run_all_success = MagicMock(
return_value={"retcode": 0, "stdout": "", "stderr": "", "pid": 12345}
)
mock_run_all_failure = MagicMock(
return_value={"retcode": 1, "stdout": "", "stderr": "", "pid": 12345}
)
def _change_state(self, action, no_block=False):
"""
Common code for start/stop/restart/reload/force_reload tests
"""
# We want the traceback if the function name can't be found in the
# systemd execution module.
func = getattr(systemd, action)
# Remove trailing _ in "reload_"
action = action.rstrip("_").replace("_", "-")
systemctl_command = ["systemctl"]
if no_block:
systemctl_command.append("--no-block")
systemctl_command.extend([action, self.unit_name + ".service"])
scope_prefix = ["systemd-run", "--scope"]
assert_kwargs = {"python_shell": False}
if action in ("enable", "disable"):
assert_kwargs["ignore_retcode"] = True
with patch.object(systemd, "_check_for_unit_changes", self.mock_none):
with patch.object(systemd, "_unit_file_changed", self.mock_none):
with patch.object(systemd, "_check_unmask", self.mock_none):
with patch.object(
systemd, "_get_sysv_services", self.mock_empty_list
):
# Has scopes available
with patch.object(
salt.utils.systemd, "has_scope", self.mock_true
):
# Scope enabled, successful
with patch.dict(
systemd.__salt__,
{
"config.get": self.mock_true,
"cmd.run_all": self.mock_run_all_success,
},
):
ret = func(self.unit_name, no_block=no_block)
self.assertTrue(ret)
self.mock_run_all_success.assert_called_with(
scope_prefix + systemctl_command, **assert_kwargs
)
# Scope enabled, failed
with patch.dict(
systemd.__salt__,
{
"config.get": self.mock_true,
"cmd.run_all": self.mock_run_all_failure,
},
):
if action in ("stop", "disable"):
ret = func(self.unit_name, no_block=no_block)
self.assertFalse(ret)
else:
self.assertRaises(
CommandExecutionError,
func,
self.unit_name,
no_block=no_block,
)
self.mock_run_all_failure.assert_called_with(
scope_prefix + systemctl_command, **assert_kwargs
)
# Scope disabled, successful
with patch.dict(
systemd.__salt__,
{
"config.get": self.mock_false,
"cmd.run_all": self.mock_run_all_success,
},
):
ret = func(self.unit_name, no_block=no_block)
self.assertTrue(ret)
self.mock_run_all_success.assert_called_with(
systemctl_command, **assert_kwargs
)
# Scope disabled, failed
with patch.dict(
systemd.__salt__,
{
"config.get": self.mock_false,
"cmd.run_all": self.mock_run_all_failure,
},
):
if action in ("stop", "disable"):
ret = func(self.unit_name, no_block=no_block)
self.assertFalse(ret)
else:
self.assertRaises(
CommandExecutionError,
func,
self.unit_name,
no_block=no_block,
)
self.mock_run_all_failure.assert_called_with(
systemctl_command, **assert_kwargs
)
# Does not have scopes available
with patch.object(
salt.utils.systemd, "has_scope", self.mock_false
):
# The results should be the same irrespective of
# whether or not scope is enabled, since scope is not
# available, so we repeat the below tests with it both
# enabled and disabled.
for scope_mock in (self.mock_true, self.mock_false):
# Successful
with patch.dict(
systemd.__salt__,
{
"config.get": scope_mock,
"cmd.run_all": self.mock_run_all_success,
},
):
ret = func(self.unit_name, no_block=no_block)
self.assertTrue(ret)
self.mock_run_all_success.assert_called_with(
systemctl_command, **assert_kwargs
)
# Failed
with patch.dict(
systemd.__salt__,
{
"config.get": scope_mock,
"cmd.run_all": self.mock_run_all_failure,
},
):
if action in ("stop", "disable"):
ret = func(self.unit_name, no_block=no_block)
self.assertFalse(ret)
else:
self.assertRaises(
CommandExecutionError,
func,
self.unit_name,
no_block=no_block,
)
self.mock_run_all_failure.assert_called_with(
systemctl_command, **assert_kwargs
)
def _mask_unmask(self, action, runtime):
"""
Common code for mask/unmask tests
"""
# We want the traceback if the function name can't be found in the
# systemd execution module, so don't provide a fallback value for the
# call to getattr() here.
func = getattr(systemd, action)
# Remove trailing _ in "unmask_"
action = action.rstrip("_").replace("_", "-")
systemctl_command = ["systemctl", action]
if runtime:
systemctl_command.append("--runtime")
systemctl_command.append(self.unit_name + ".service")
scope_prefix = ["systemd-run", "--scope"]
args = [self.unit_name, runtime]
masked_mock = self.mock_true if action == "unmask" else self.mock_false
with patch.object(systemd, "_check_for_unit_changes", self.mock_none):
if action == "unmask":
mock_not_run = MagicMock(
return_value={
"retcode": 0,
"stdout": "",
"stderr": "",
"pid": 12345,
}
)
with patch.dict(systemd.__salt__, {"cmd.run_all": mock_not_run}):
with patch.object(systemd, "masked", self.mock_false):
# Test not masked (should take no action and return True)
self.assertTrue(systemd.unmask_(self.unit_name))
# Also should not have called cmd.run_all
self.assertTrue(mock_not_run.call_count == 0)
with patch.object(systemd, "masked", masked_mock):
# Has scopes available
with patch.object(salt.utils.systemd, "has_scope", self.mock_true):
# Scope enabled, successful
with patch.dict(
systemd.__salt__,
{
"config.get": self.mock_true,
"cmd.run_all": self.mock_run_all_success,
},
):
ret = func(*args)
self.assertTrue(ret)
self.mock_run_all_success.assert_called_with(
scope_prefix + systemctl_command,
python_shell=False,
redirect_stderr=True,
)
# Scope enabled, failed
with patch.dict(
systemd.__salt__,
{
"config.get": self.mock_true,
"cmd.run_all": self.mock_run_all_failure,
},
):
self.assertRaises(CommandExecutionError, func, *args)
self.mock_run_all_failure.assert_called_with(
scope_prefix + systemctl_command,
python_shell=False,
redirect_stderr=True,
)
# Scope disabled, successful
with patch.dict(
systemd.__salt__,
{
"config.get": self.mock_false,
"cmd.run_all": self.mock_run_all_success,
},
):
ret = func(*args)
self.assertTrue(ret)
self.mock_run_all_success.assert_called_with(
systemctl_command, python_shell=False, redirect_stderr=True
)
# Scope disabled, failed
with patch.dict(
systemd.__salt__,
{
"config.get": self.mock_false,
"cmd.run_all": self.mock_run_all_failure,
},
):
self.assertRaises(CommandExecutionError, func, *args)
self.mock_run_all_failure.assert_called_with(
systemctl_command, python_shell=False, redirect_stderr=True
)
# Does not have scopes available
with patch.object(salt.utils.systemd, "has_scope", self.mock_false):
# The results should be the same irrespective of
# whether or not scope is enabled, since scope is not
# available, so we repeat the below tests with it both
# enabled and disabled.
for scope_mock in (self.mock_true, self.mock_false):
# Successful
with patch.dict(
systemd.__salt__,
{
"config.get": scope_mock,
"cmd.run_all": self.mock_run_all_success,
},
):
ret = func(*args)
self.assertTrue(ret)
self.mock_run_all_success.assert_called_with(
systemctl_command,
python_shell=False,
redirect_stderr=True,
)
# Failed
with patch.dict(
systemd.__salt__,
{
"config.get": scope_mock,
"cmd.run_all": self.mock_run_all_failure,
},
):
self.assertRaises(CommandExecutionError, func, *args)
self.mock_run_all_failure.assert_called_with(
systemctl_command,
python_shell=False,
redirect_stderr=True,
)
def test_start(self):
self._change_state("start", no_block=False)
self._change_state("start", no_block=True)
def test_stop(self):
self._change_state("stop", no_block=False)
self._change_state("stop", no_block=True)
def test_restart(self):
self._change_state("restart", no_block=False)
self._change_state("restart", no_block=True)
def test_reload(self):
self._change_state("reload_", no_block=False)
self._change_state("reload_", no_block=True)
def test_force_reload(self):
self._change_state("force_reload", no_block=False)
self._change_state("force_reload", no_block=True)
def test_enable(self):
self._change_state("enable", no_block=False)
self._change_state("enable", no_block=True)
def test_disable(self):
self._change_state("disable", no_block=False)
self._change_state("disable", no_block=True)
def test_mask(self):
self._mask_unmask("mask", False)
def test_mask_runtime(self):
self._mask_unmask("mask", True)
def test_unmask(self):
self._mask_unmask("unmask_", False)
def test_unmask_runtime(self):
self._mask_unmask("unmask_", True)
def test_firstboot(self):
"""
Test service.firstboot without parameters
"""
result = {"retcode": 0, "stdout": "stdout"}
salt_mock = {
"cmd.run_all": MagicMock(return_value=result),
}
with patch.dict(systemd.__salt__, salt_mock):
assert systemd.firstboot()
salt_mock["cmd.run_all"].assert_called_with(["systemd-firstboot"])
def test_firstboot_params(self):
"""
Test service.firstboot with parameters
"""
result = {"retcode": 0, "stdout": "stdout"}
salt_mock = {
"cmd.run_all": MagicMock(return_value=result),
}
with patch.dict(systemd.__salt__, salt_mock):
assert systemd.firstboot(
locale="en_US.UTF-8",
locale_message="en_US.UTF-8",
keymap="jp",
timezone="Europe/Berlin",
hostname="node-001",
machine_id="1234567890abcdef",
root="/mnt",
)
salt_mock["cmd.run_all"].assert_called_with(
[
"systemd-firstboot",
"--locale",
"en_US.UTF-8",
"--locale-message",
"en_US.UTF-8",
"--keymap",
"jp",
"--timezone",
"Europe/Berlin",
"--hostname",
"node-001",
"--machine-ID",
"1234567890abcdef",
"--root",
"/mnt",
]
)
def test_firstboot_error(self):
"""
Test service.firstboot error
"""
result = {"retcode": 1, "stderr": "error"}
salt_mock = {
"cmd.run_all": MagicMock(return_value=result),
}
with patch.dict(systemd.__salt__, salt_mock):
with pytest.raises(CommandExecutionError):
assert systemd.firstboot()
| 41.765861 | 91 | 0.475135 |
0982274b543e0613035ab9aa92a67dad0c57154e
| 180 |
py
|
Python
|
cas/config.py
|
jcmcken/cas
|
46933cf6320f6ebd8199459c0bbbf1ebfcb9863d
|
[
"BSD-3-Clause"
] | null | null | null |
cas/config.py
|
jcmcken/cas
|
46933cf6320f6ebd8199459c0bbbf1ebfcb9863d
|
[
"BSD-3-Clause"
] | 1 |
2015-04-20T03:28:17.000Z
|
2015-04-23T01:53:29.000Z
|
cas/config.py
|
jcmcken/cas
|
46933cf6320f6ebd8199459c0bbbf1ebfcb9863d
|
[
"BSD-3-Clause"
] | null | null | null |
import os
DEBUG = os.environ.get('CAS_DEBUG', 'false') == 'true'
CAS_ROOT = os.environ.get('CAS_ROOT', None)
CAS_PLUGIN_DIR = os.path.join(os.path.dirname(__file__), 'plugins')
| 22.5 | 67 | 0.705556 |
585e53b61c628fe378d69de2d763d0b9517b0ea9
| 1,422 |
py
|
Python
|
cohesity_management_sdk/models/periodicity_extended_retention_policy_enum.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18 |
2019-09-24T17:35:53.000Z
|
2022-03-25T08:08:47.000Z
|
cohesity_management_sdk/models/periodicity_extended_retention_policy_enum.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18 |
2019-03-29T19:32:29.000Z
|
2022-01-03T23:16:45.000Z
|
cohesity_management_sdk/models/periodicity_extended_retention_policy_enum.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 16 |
2019-02-27T06:54:12.000Z
|
2021-11-16T18:10:24.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class PeriodicityExtendedRetentionPolicyEnum(object):
"""Implementation of the 'Periodicity_ExtendedRetentionPolicy' enum.
Specifies the frequency that Snapshots should be copied to the
specified target. Used in combination with multiplier.
'kEvery' means that the Snapshot copy occurs after the number of
Job Runs equals the number specified in the multiplier.
'kHour' means that the Snapshot copy occurs hourly at the frequency
set in the multiplier, for example if multiplier is 2, the copy occurs
every 2 hours.
'kDay' means that the Snapshot copy occurs daily at the frequency
set in the multiplier.
'kWeek' means that the Snapshot copy occurs weekly at the frequency
set in the multiplier.
'kMonth' means that the Snapshot copy occurs monthly at the frequency
set in the multiplier.
'kYear' means that the Snapshot copy occurs yearly at the frequency
set in the multiplier.
Attributes:
KEVERY: TODO: type description here.
KHOUR: TODO: type description here.
KDAY: TODO: type description here.
KWEEK: TODO: type description here.
KMONTH: TODO: type description here.
KYEAR: TODO: type description here.
"""
KEVERY = 'kEvery'
KHOUR = 'kHour'
KDAY = 'kDay'
KWEEK = 'kWeek'
KMONTH = 'kMonth'
KYEAR = 'kYear'
| 30.913043 | 74 | 0.698312 |
4d22afe6a4e7e07b27a5f79f35fa59321a37eacc
| 9,389 |
py
|
Python
|
simple_redirect.py
|
0xC0ncord/TURRPG2
|
43a5ad00ae38070a5c23b813e793de2ac5e7cd72
|
[
"OML"
] | 1 |
2020-09-22T14:15:21.000Z
|
2020-09-22T14:15:21.000Z
|
simple_redirect.py
|
0xC0ncord/TURRPG2
|
43a5ad00ae38070a5c23b813e793de2ac5e7cd72
|
[
"OML"
] | 22 |
2021-12-12T22:24:47.000Z
|
2022-01-13T22:48:21.000Z
|
simple_redirect.py
|
0xC0ncord/TURRPG2
|
43a5ad00ae38070a5c23b813e793de2ac5e7cd72
|
[
"OML"
] | null | null | null |
#!/usr/bin/env python3
import http.server, socketserver
import urllib.parse
import os
import signal
PORT = 8000
ALLOWED_EXTENSIONS = ('ukx', 'ut2', 'uax', 'usx', 'u', 'utx')
ASSET_DIRS = ('Animations', 'Maps', 'Sounds', 'StaticMeshes', 'System', 'Textures')
USE_COMPRESSION = True
TEMP_COMPRESSED_DIR = ".redirect"
import subprocess #TODO get rid of this when we use zlib
UCC_PATH = "System/ucc.exe"
#COMPRESS_OBJ = None
PROCESSING_FILES = []
class UT2K4RedirectHandler(http.server.SimpleHTTPRequestHandler):
# Compress a file, 1Gb at a time
def compress_file(self, infilepath: str, outfilepath: str) -> None:
global PROCESSING_FILES
print("Compressing file {} to {}".format(infilepath, outfilepath))
# Append this file to files that we are currently compressing
# This makes any further requests for it wait until it is complete
PROCESSING_FILES.append(infilepath)
# FIXME temporary workaround for zlib not cooperating
subprocess.run(["wine", UCC_PATH, "compress", "../{}".format(infilepath).replace("/", "\\")], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
subprocess.run(["mv", "{}.uz2".format(infilepath), outfilepath], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# TODO
# Open this file
#infile = open(infilepath, 'rb')
# Open the output file
#outfile = open(outfilepath, 'wb+')
# Get input file's total size
#infile.seek(0, 2)
#size_total = infile.tell()
# Seek back to beginning
#infile.seek(0)
# Start compressing and writing data, 1Gb at a time
#data = bytes()
#read = 0
#size = 1073741824 # 1Gb
#while read < size_total:
# outfile.write(COMPRESS_OBJ.compress(infile.read(size)))
# read = read + size
#outfile.write(COMPRESS_OBJ.flush())
# Close handles
#infile.close()
#outfile.close()
# Remove this file from the processing list
PROCESSING_FILES.remove(infilepath)
def do_GET(self) -> None:
# Parse query data to find out what was requested
parsedParams = urllib.parse.urlparse(self.path)
# Only respond if the requested file ends with an allowed file extension
if not parsedParams.path.endswith(ALLOWED_EXTENSIONS) and not parsedParams.path.endswith('uz2'):
print("Request did not end with an allowed file extension; sending 404 response.")
self.send_response(404)
self.end_headers()
return
# Only respond to the 'Unreal' user-agent
if self.headers.get('User-Agent') != "Unreal":
print("Request did not contain 'User-Agent: Unreal' header; sending 404 response.")
self.send_response(404)
self.end_headers()
return
# Only respond if the client sends a 'Connection: close'
if self.headers.get('Connection') != "close":
print("Request did not contain 'Connection: close' header; sending 404 response.")
self.send_response(404)
self.end_headers()
return
# Unquote the requested path before looking in the filesystem
filepath = urllib.parse.unquote(parsedParams.path)
if (filepath.endswith('.uz2') and not USE_COMPRESSION) or (not filepath.endswith('.uz2') and USE_COMPRESSION):
# Can't do it
print("Request for compressed/uncompressed file did not match configured mode.")
self.send_response(404)
self.end_headers()
return
if not USE_COMPRESSION:
# See if the file requested exists
if os.access('.' + os.sep + filepath, os.R_OK):
# File exists, serve it up
http.server.SimpleHTTPRequestHandler.do_GET(self);
else:
# Try to find out where this file lives
foundIt = False
for asset_dir in ASSET_DIRS:
if os.access('.' + os.sep + asset_dir + os.sep + filepath, os.R_OK):
foundIt = True
self.send_response(200)
self.end_headers()
# Found it, serve it up
with open('.' + os.sep + asset_dir + os.sep + filepath, 'rb') as fp:
self.copyfile(fp, self.wfile)
break
if not foundIt:
# Didn't find it
self.send_response(404)
self.end_headers()
else:
# Check our temporary directory for the file in its compressed form if it exists
if os.access(TEMP_COMPRESSED_DIR + os.sep + filepath, os.R_OK):
self.send_response(200)
self.end_headers()
# Serve it up
with open(TEMP_COMPRESSED_DIR + os.sep + filepath, 'rb') as fp:
self.copyfile(fp, self.wfile)
else:
# Need to find the uncompressed file first
uncomp_filepath = filepath[:-len('.uz2')]
# See if this file is already being compressed at this time
if filepath in PROCESSING_FILES:
# Wait for it and then serve it up
while filepath in PROCESSING_FILES:
time.sleep(1)
# It's done
self.send_response(200)
self.end_headers()
# Serve it up
try:
with open(TEMP_COMPRESSED_DIR + os.sep + filepath, 'rb') as fp:
self.copyfile(fp, self.wfile)
finally:
return
foundIt = False
for asset_dir in ASSET_DIRS:
if os.access('.' + os.sep + asset_dir + os.sep + uncomp_filepath, os.R_OK):
foundIt = True
# Found it, so let's compress it
self.compress_file('.' + os.sep + asset_dir + os.sep + uncomp_filepath, TEMP_COMPRESSED_DIR + os.sep + filepath)
self.send_response(200)
self.end_headers()
# Serve it up
try:
with open(TEMP_COMPRESSED_DIR + os.sep + filepath, 'rb') as fp:
self.copyfile(fp, self.wfile)
finally:
return
if not foundIt:
# Didn't find it
self.send_response(404)
self.end_headers()
# Ready, set, go!
def run() -> None:
global USE_COMPRESSION
global TEMP_COMPRESSED_DIR
#global COMPRESS_OBJ
def clean_up() -> None:
httpd.shutdown()
if USE_COMPRESSION:
shutil.rmtree(TEMP_COMPRESSED_DIR)
signal.signal(signal.SIGTERM, clean_up)
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
try:
# Setup and bind
Handler = UT2K4RedirectHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
except OSError:
return
try:
# Set up things needed for compression if enabled
if USE_COMPRESSION:
try:
if not os.path.exists(TEMP_COMPRESSED_DIR):
# Make our temporary holding area for compressed files
os.mkdir(TEMP_COMPRESSED_DIR)
# TODO
# Import zlib and get ourselves a compression object for working with them
#import zlib
#COMPRESS_OBJ = zlib.compressobj()
# time is needed to sleep while waiting for compressed files
import time
# shutil needed to remove the temporary directory tree containing compressed files on cleanup
import shutil
except (OSError, ImportError):
# Whatever, just disable compression
USE_COMPRESSION = False
# Start
httpd.serve_forever()
except:
# Clean up
clean_up()
# Main
if __name__ == "__main__":
import argparse
# Setup arguments
parser = argparse.ArgumentParser(description="A simple UT2004 redirect server.")
parser.add_argument('-d', action='store', dest='serve_dir', help="Set the base directory to serve files.")
parser.add_argument('-c', action='store_true', dest='compression', help="Compress files to '.uz2' before serving.")
parser.add_argument('--no-daemonize', '-n', action='store_true', dest='no_daemonize', help="Run in the foreground.")
args = parser.parse_args()
# If running in foreground, go ahead
if args.no_daemonize:
if args.serve_dir is not None:
os.chdir(args.serve_dir + os.sep)
USE_COMPRESSION = args.compression
run()
# Else fork ourselves running in the background
else:
import subprocess
cmd = ['python3', __file__]
if args.serve_dir is not None:
cmd += ['-d', args.serve_dir]
if args.compression:
cmd += ['-c']
cmd += ['-n']
proc = subprocess.Popen(cmd)
# Return the PID
print(proc.pid)
| 36.964567 | 149 | 0.562467 |
aae01310da3b24b9a1a6b3e2fb5fcb78ba37387d
| 573 |
py
|
Python
|
src/174. Dungeon Game.py
|
xiaonanln/myleetcode-python
|
95d282f21a257f937cd22ef20c3590a69919e307
|
[
"Apache-2.0"
] | null | null | null |
src/174. Dungeon Game.py
|
xiaonanln/myleetcode-python
|
95d282f21a257f937cd22ef20c3590a69919e307
|
[
"Apache-2.0"
] | null | null | null |
src/174. Dungeon Game.py
|
xiaonanln/myleetcode-python
|
95d282f21a257f937cd22ef20c3590a69919e307
|
[
"Apache-2.0"
] | null | null | null |
class Solution(object):
def calculateMinimumHP(self, dungeon):
"""
:type dungeon: List[List[int]]
:rtype: int
"""
R = len(dungeon)
C = len(dungeon[0])
minBlood = [None] * C
for r in xrange(R-1, -1, -1):
for c in xrange(C-1, -1, -1):
d = dungeon[r][c]
if r < R-1 and c < C-1:
minBlood[c] = min(max(1, minBlood[c] - d), max(1, minBlood[c+1] - d))
elif r < R-1:
minBlood[c] = max(1, minBlood[c] - d)
elif c < C-1:
minBlood[c] = max(1, minBlood[c+1] - d)
else:
minBlood[c] = 1-d if d<0 else 1
return minBlood[0]
| 24.913043 | 74 | 0.542757 |
43820a6915219b7cbb53d66c4496d1745a6a7f09
| 16,560 |
py
|
Python
|
pysteps/visualization/precipfields.py
|
Fangyh09/pysteps
|
9eb7f4ead0a946d98b7504d1bd66b18dc405ed51
|
[
"BSD-3-Clause"
] | 6 |
2019-01-06T07:42:55.000Z
|
2021-02-03T13:59:50.000Z
|
pysteps/visualization/precipfields.py
|
Fangyh09/pysteps
|
9eb7f4ead0a946d98b7504d1bd66b18dc405ed51
|
[
"BSD-3-Clause"
] | 5 |
2018-12-23T15:10:27.000Z
|
2021-01-06T15:03:03.000Z
|
pysteps/visualization/precipfields.py
|
Fangyh09/pysteps
|
9eb7f4ead0a946d98b7504d1bd66b18dc405ed51
|
[
"BSD-3-Clause"
] | 2 |
2019-08-06T14:16:43.000Z
|
2019-08-13T00:36:31.000Z
|
"""
pysteps.visualization.precipfields
==================================
Methods for plotting precipitation fields.
.. autosummary::
:toctree: ../generated/
plot_precip_field
get_colormap
"""
import matplotlib.pylab as plt
from matplotlib import cm, colors, gridspec
import numpy as np
from pysteps.exceptions import UnsupportedSomercProjection
from . import basemaps
from . import utils
def plot_precip_field(R, type="intensity", map=None, geodata=None, units='mm/h',
colorscale='pysteps', probthr=None, title=None,
colorbar=True, drawlonlatlines=False, lw=0.5, axis="on",
cax=None, **kwargs):
"""
Function to plot a precipitation intensity or probability field with a
colorbar.
.. _Axes: https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes
.. _SubplotSpec: https://matplotlib.org/api/_as_gen/matplotlib.gridspec.SubplotSpec.html
.. _cartopy: https://scitools.org.uk/cartopy/docs/latest
.. _mpl_toolkits.basemap: https://matplotlib.org/basemap
Parameters
----------
R : array-like
Two-dimensional array containing the input precipitation field or an
exceedance probability map.
type : {'intensity', 'depth', 'prob'}, optional
Type of the map to plot: 'intensity' = precipitation intensity field,
'depth' = precipitation depth (accumulation) field,
'prob' = exceedance probability field.
map : {'basemap', 'cartopy'}, optional
Optional method for plotting a map: 'basemap' or 'cartopy'. The former
uses `mpl_toolkits.basemap`_, while the latter uses cartopy_.
geodata : dictionary, optional
Optional dictionary containing geographical information about the field.
If geodata is not None, it must contain the following key-value pairs:
.. tabularcolumns:: |p{1.5cm}|L|
+-----------------+----------------------------------------------------+
| Key | Value |
+=================+====================================================+
| projection | PROJ.4-compatible projection definition |
+-----------------+----------------------------------------------------+
| x1 | x-coordinate of the lower-left corner of the data |
| | raster (meters) |
+-----------------+----------------------------------------------------+
| y1 | y-coordinate of the lower-left corner of the data |
| | raster (meters) |
+-----------------+----------------------------------------------------+
| x2 | x-coordinate of the upper-right corner of the data |
| | raster (meters) |
+-----------------+----------------------------------------------------+
| y2 | y-coordinate of the upper-right corner of the data |
| | raster (meters) |
+-----------------+----------------------------------------------------+
| yorigin | a string specifying the location of the first |
| | element in the data raster w.r.t. y-axis: |
| | 'upper' = upper border, 'lower' = lower border |
+-----------------+----------------------------------------------------+
units : {'mm/h', 'mm', 'dBZ'}, optional
Units of the input array. If type is 'prob', this specifies the unit of
the intensity threshold.
colorscale : {'pysteps', 'STEPS-BE', 'BOM-RF3'}, optional
Which colorscale to use. Applicable if units is 'mm/h', 'mm' or 'dBZ'.
probthr : float, optional
Intensity threshold to show in the color bar of the exceedance probability
map. Required if type is "prob" and colorbar is True.
title : str, optional
If not None, print the title on top of the plot.
colorbar : bool, optional
If set to True, add a colorbar on the right side of the plot.
drawlonlatlines : bool, optional
If set to True, draw longitude and latitude lines. Applicable if map is
'basemap' or 'cartopy'.
lw: float, optional
Linewidth of the map (administrative boundaries and coastlines).
axis : {'off','on'}, optional
Whether to turn off or on the x and y axis.
cax : Axes_ object, optional
Axes into which the colorbar will be drawn. If no axes is provided
the colorbar axes are created next to the plot.
Other parameters
----------------
Optional parameters are contained in **kwargs. See basemaps.plot_geography.
Returns
-------
ax : fig Axes_
Figure axes. Needed if one wants to add e.g. text inside the plot.
"""
if type not in ["intensity", "depth", "prob"]:
raise ValueError("invalid type '%s', must be 'intensity', 'depth' or 'prob'" % type)
if units not in ["mm/h", "mm", "dBZ"]:
raise ValueError("invalid units '%s', must be 'mm/h', 'mm' or 'dBZ'" % units)
if type == "prob" and colorbar and probthr is None:
raise ValueError("type='prob' but probthr not specified")
if map is not None and geodata is None:
raise ValueError("map!=None but geodata=None")
if len(R.shape) != 2:
raise ValueError("the input is not two-dimensional array")
# get colormap and color levels
cmap, norm, clevs, clevsStr = get_colormap(type, units, colorscale)
# extract extent and origin
if geodata is not None:
extent = (geodata['x1'],geodata['x2'], geodata['y1'],geodata['y2'])
origin = geodata["yorigin"]
else:
extent = (0, R.shape[1]-1, 0, R.shape[0]-1)
origin = "upper"
# plot geography
if map is not None:
try:
ax = basemaps.plot_geography(map, geodata["projection"],
extent, R.shape, lw, drawlonlatlines, **kwargs)
regular_grid = True
except UnsupportedSomercProjection:
# Define default fall-back projection for Swiss data(EPSG:3035)
# This will work reasonably well for Europe only.
t_proj4str = "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +ellps=GRS80 +units=m +no_defs"
geodata = utils.reproject_geodata(geodata, t_proj4str, return_grid="quadmesh")
extent = (geodata['x1'], geodata['x2'], geodata['y1'], geodata['y2'])
X, Y = geodata["X_grid"], geodata["Y_grid"]
regular_grid = geodata["regular_grid"]
ax = basemaps.plot_geography(map, geodata["projection"],
extent, R.shape, lw, drawlonlatlines, **kwargs)
else:
regular_grid = True
ax = plt.gca()
# plot rainfield
if regular_grid:
im = _plot_field(R, ax, type, units, colorscale, extent=extent, origin=origin)
else:
if origin == "upper":
Y = np.flipud(Y)
im = _plot_field_pcolormesh(X, Y, R, ax, type, units, colorscale)
# plot radar domain mask
mask = np.ones(R.shape)
mask[~np.isnan(R)] = np.nan # Fully transparent within the radar domain
ax.imshow(mask, cmap=colors.ListedColormap(['gray']), alpha=0.5,
zorder=1e6, extent=extent, origin=origin)
# ax.pcolormesh(X, Y, np.flipud(mask), cmap=colors.ListedColormap(['gray']),
# alpha=0.5, zorder=1e6)
# TODO: pcolormesh doesn't work properly with the alpha parameter
if title is not None:
plt.title(title)
# add colorbar
if colorbar:
cbar = plt.colorbar(im, ticks=clevs, spacing='uniform', norm=norm,
extend="max" if type in ["intensity", "depth"] else "neither",
shrink=0.8, cax=cax)
if clevsStr != None:
cbar.ax.set_yticklabels(clevsStr)
if type == "intensity":
cbar.ax.set_title(units, fontsize=10)
cbar.set_label("Precipitation intensity")
elif type == "depth":
cbar.ax.set_title(units, fontsize=10)
cbar.set_label("Precipitation depth")
else:
cbar.set_label("P(R > %.1f %s)" % (probthr, units))
if geodata is None or axis == "off":
axes = plt.gca()
axes.xaxis.set_ticks([])
axes.xaxis.set_ticklabels([])
axes.yaxis.set_ticks([])
axes.yaxis.set_ticklabels([])
return plt.gca()
def _plot_field(R, ax, type, units, colorscale, extent, origin=None):
R = R.copy()
# Get colormap and color levels
cmap, norm, clevs, clevsStr = get_colormap(type, units, colorscale)
# Plot precipitation field
# transparent where no precipitation or the probability is zero
if type in ["intensity", "depth"]:
if units in ['mm/h', 'mm']:
R[R < 0.1] = np.nan
elif units == 'dBZ':
R[R < 10] = np.nan
else:
R[R < 1e-3] = np.nan
vmin,vmax = [None, None] if type in ["intensity", "depth"] else [0.0, 1.0]
im = ax.imshow(R, cmap=cmap, norm=norm, extent=extent, interpolation='nearest',
vmin=vmin, vmax=vmax, origin=origin, zorder=1)
return im
def _plot_field_pcolormesh(X, Y, R, ax, type, units, colorscale):
R = R.copy()
# Get colormap and color levels
cmap, norm, clevs, clevsStr = get_colormap(type, units, colorscale)
# Plot precipitation field
# transparent where no precipitation or the probability is zero
if type in ["intensity", "depth"]:
if units in ['mm/h', 'mm']:
R[R < 0.1] = np.nan
elif units == 'dBZ':
R[R < 10] = np.nan
else:
R[R < 1e-3] = np.nan
vmin,vmax = [None, None] if type in ["intensity", "depth"] else [0.0, 1.0]
im = ax.pcolormesh(X, Y, R, cmap=cmap, norm=norm, vmin=vmin, vmax=vmax, zorder=1)
return im
def get_colormap(type, units='mm/h', colorscale='pysteps'):
"""Function to generate a colormap (cmap) and norm.
Parameters
----------
type : {'intensity', 'depth', 'prob'}, optional
Type of the map to plot: 'intensity' = precipitation intensity field,
'depth' = precipitation depth (accumulation) field,
'prob' = exceedance probability field.
units : {'mm/h', 'mm', 'dBZ'}, optional
Units of the input array. If type is 'prob', this specifies the unit of
the intensity threshold.
colorscale : {'pysteps', 'STEPS-BE', 'BOM-RF3'}, optional
Which colorscale to use. Applicable if units is 'mm/h', 'mm' or 'dBZ'.
Returns
-------
cmap : Colormap instance
colormap
norm : colors.Normalize object
Colors norm
clevs: list(float)
List of precipitation values defining the color limits.
clevsStr: list(str)
List of precipitation values defining the color limits (with correct
number of decimals).
"""
if type in ["intensity", "depth"]:
# Get list of colors
color_list,clevs,clevsStr = _get_colorlist(units, colorscale)
cmap = colors.LinearSegmentedColormap.from_list("cmap", color_list, len(clevs)-1)
if colorscale == 'BOM-RF3':
cmap.set_over('black',1)
if colorscale == 'pysteps':
cmap.set_over('darkred',1)
if colorscale == 'STEPS-BE':
cmap.set_over('black',1)
norm = colors.BoundaryNorm(clevs, cmap.N)
return cmap, norm, clevs, clevsStr
elif type == "prob":
cmap = plt.get_cmap("OrRd", 10)
return cmap, colors.Normalize(vmin=0, vmax=1), None, None
else:
return cm.jet, colors.Normalize(), None, None
def _get_colorlist(units='mm/h', colorscale='pysteps'):
"""
Function to get a list of colors to generate the colormap.
Parameters
----------
units : str
Units of the input array (mm/h, mm or dBZ)
colorscale : str
Which colorscale to use (BOM-RF3, pysteps, STEPS-BE)
Returns
-------
color_list : list(str)
List of color strings.
clevs : list(float)
List of precipitation values defining the color limits.
clevsStr : list(str)
List of precipitation values defining the color limits
(with correct number of decimals).
"""
if colorscale == "BOM-RF3":
color_list = np.array([(255, 255, 255), # 0.0
(245, 245, 255), # 0.2
(180, 180, 255), # 0.5
(120, 120, 255), # 1.5
(20, 20, 255), # 2.5
(0, 216, 195), # 4.0
(0, 150, 144), # 6.0
(0, 102, 102), # 10
(255, 255, 0), # 15
(255, 200, 0), # 20
(255, 150, 0), # 30
(255, 100, 0), # 40
(255, 0, 0), # 50
(200, 0, 0), # 60
(120, 0, 0), # 75
(40, 0, 0)]) # > 100
color_list = color_list/255.
if units == 'mm/h':
clevs = [0.,0.2, 0.5, 1.5, 2.5, 4, 6, 10, 15, 20, 30, 40, 50, 60, 75,
100, 150]
elif units == "mm":
clevs = [0.,0.2, 0.5, 1.5, 2.5, 4, 5, 7, 10, 15, 20, 25, 30, 35, 40,
45, 50]
else:
raise ValueError('Wrong units in get_colorlist: %s' % units)
elif colorscale == 'pysteps':
pinkHex = '#%02x%02x%02x' % (232, 215, 242)
redgreyHex = '#%02x%02x%02x' % (156, 126, 148)
color_list = [redgreyHex, "#640064","#AF00AF","#DC00DC","#3232C8","#0064FF","#009696","#00C832",
"#64FF00","#96FF00","#C8FF00","#FFFF00","#FFC800","#FFA000","#FF7D00","#E11900"]
if units in ['mm/h', 'mm']:
clevs= [0.08,0.16,0.25,0.40,0.63,1,1.6,2.5,4,6.3,10,16,25,40,63,100,160]
elif units == 'dBZ':
clevs = np.arange(10,65,5)
else:
raise ValueError('Wrong units in get_colorlist: %s' % units)
elif colorscale == 'STEPS-BE':
color_list = ['cyan','deepskyblue','dodgerblue','blue','chartreuse','limegreen','green','darkgreen','yellow','gold','orange','red','magenta','darkmagenta']
if units in ['mm/h', 'mm']:
clevs = [0.1,0.25,0.4,0.63,1,1.6,2.5,4,6.3,10,16,25,40,63,100]
elif units == 'dBZ':
clevs = np.arange(10,65,5)
else:
raise ValueError('Wrong units in get_colorlist: %s' % units)
else:
print('Invalid colorscale', colorscale)
raise ValueError("Invalid colorscale " + colorscale)
# Generate color level strings with correct amount of decimal places
clevsStr = []
clevsStr = _dynamic_formatting_floats(clevs, )
return color_list, clevs, clevsStr
def _dynamic_formatting_floats(floatArray, colorscale='pysteps'):
"""
Function to format the floats defining the class limits of the colorbar.
"""
floatArray = np.array(floatArray, dtype=float)
labels = []
for label in floatArray:
if label >= 0.1 and label < 1:
if colorscale == 'pysteps':
formatting = ',.2f'
else:
formatting = ',.1f'
elif label >= 0.01 and label < 0.1:
formatting = ',.2f'
elif label >= 0.001 and label < 0.01:
formatting = ',.3f'
elif label >= 0.0001 and label < 0.001:
formatting = ',.4f'
elif label >= 1 and label.is_integer():
formatting = 'i'
else:
formatting = ',.1f'
if formatting != 'i':
labels.append(format(label, formatting))
else:
labels.append(str(int(label)))
return labels
| 40.788177 | 164 | 0.521316 |
344df2dff22922dc53e8a801b6bd0658cb94f3e0
| 7,548 |
py
|
Python
|
src/snowflake/connector/json_result.py
|
666Chao666/snowflake-connector-python
|
81a10e522fcf19d45580c79bc066b7a4eab25485
|
[
"Apache-2.0"
] | null | null | null |
src/snowflake/connector/json_result.py
|
666Chao666/snowflake-connector-python
|
81a10e522fcf19d45580c79bc066b7a4eab25485
|
[
"Apache-2.0"
] | 14 |
2021-01-26T06:53:10.000Z
|
2022-03-14T11:16:54.000Z
|
src/snowflake/connector/json_result.py
|
666Chao666/snowflake-connector-python
|
81a10e522fcf19d45580c79bc066b7a4eab25485
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved.
#
from logging import getLogger
from .constants import FIELD_ID_TO_NAME
from .errorcode import ER_FAILED_TO_CONVERT_ROW_TO_PYTHON_TYPE
from .errors import Error, InterfaceError
from .telemetry import TelemetryField
from .time_util import get_time_millis
logger = getLogger(__name__)
class JsonResult:
def __init__(self, raw_response, cursor):
self._reset()
self._cursor = cursor
self._connection = cursor.connection
self._init_from_meta(raw_response)
def _init_from_meta(self, data):
self._total_row_index = -1 # last fetched number of rows
self._chunk_index = 0
self._chunk_count = 0
self._current_chunk_row = iter(data.get('rowset'))
self._current_chunk_row_count = len(data.get('rowset'))
self._column_converter = []
self._column_idx_to_name = {}
for idx, column in enumerate(data['rowtype']):
self._column_idx_to_name[idx] = column['name']
self._column_converter.append(
self._connection.converter.to_python_method(
column['type'].upper(), column))
if 'chunks' in data:
chunks = data['chunks']
self._chunk_count = len(chunks)
logger.debug('chunk size=%s', self._chunk_count)
# prepare the downloader for further fetch
qrmk = data['qrmk'] if 'qrmk' in data else None
chunk_headers = None
if 'chunkHeaders' in data:
chunk_headers = {}
for header_key, header_value in data[
'chunkHeaders'].items():
chunk_headers[header_key] = header_value
logger.debug(
'added chunk header: key=%s, value=%s',
header_key,
header_value)
logger.debug('qrmk=%s', qrmk)
self._chunk_downloader = self._connection._chunk_downloader_class(
chunks, self._connection, self._cursor, qrmk, chunk_headers,
query_result_format='json',
prefetch_threads=self._connection.client_prefetch_threads)
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
is_done = False
try:
row = None
self.total_row_index += 1
try:
row = next(self._current_chunk_row)
except StopIteration:
if self._chunk_index < self._chunk_count:
logger.debug(
"chunk index: %s, chunk_count: %s",
self._chunk_index, self._chunk_count)
next_chunk = self._chunk_downloader.next_chunk()
self._current_chunk_row_count = next_chunk.row_count
self._current_chunk_row = next_chunk.result_data
self._chunk_index += 1
try:
row = next(self._current_chunk_row)
except StopIteration:
is_done = True
raise IndexError
else:
if self._chunk_count > 0 and \
self._chunk_downloader is not None:
self._chunk_downloader.terminate()
self._cursor._log_telemetry_job_data(
TelemetryField.TIME_DOWNLOADING_CHUNKS,
self._chunk_downloader._total_millis_downloading_chunks)
self._cursor._log_telemetry_job_data(
TelemetryField.TIME_PARSING_CHUNKS,
self._chunk_downloader._total_millis_parsing_chunks)
self._chunk_downloader = None
self._chunk_count = 0
self._current_chunk_row = iter(())
is_done = True
if is_done:
raise StopIteration
return self._row_to_python(row) if row is not None else None
except IndexError:
# returns None if the iteration is completed so that iter() stops
return None
finally:
if is_done and self._cursor._first_chunk_time:
logger.info("fetching data done")
time_consume_last_result = get_time_millis() - self._cursor._first_chunk_time
self._cursor._log_telemetry_job_data(
TelemetryField.TIME_CONSUME_LAST_RESULT,
time_consume_last_result)
def _row_to_python(self, row):
"""Converts data in row if required.
NOTE: surprisingly using idx+1 is faster than enumerate here. Also
removing generator improved performance even better.
"""
idx = 0
for col in row:
conv = self._column_converter[idx]
try:
row[idx] = col if conv is None or col is None else conv(col)
except Exception as e:
col_desc = self._cursor.description[idx]
msg = 'Failed to convert: ' \
'field {name}: {type}::{value}, Error: ' \
'{error}'.format(
name=col_desc[0],
type=FIELD_ID_TO_NAME[col_desc[1]],
value=col,
error=e)
logger.exception(msg)
Error.errorhandler_wrapper(
self._connection, self._cursor, InterfaceError, {
'msg': msg,
'errno': ER_FAILED_TO_CONVERT_ROW_TO_PYTHON_TYPE,
})
idx += 1
return tuple(row)
def _reset(self):
self.total_row_index = -1 # last fetched number of rows
self._current_chunk_row_count = 0
self._current_chunk_row = iter(())
self._chunk_index = 0
if hasattr(self, '_chunk_count') and self._chunk_count > 0 and \
self._chunk_downloader is not None:
self._chunk_downloader.terminate()
self._chunk_count = 0
self._chunk_downloader = None
class DictJsonResult(JsonResult):
def __init__(self, raw_response, cursor):
JsonResult.__init__(self, raw_response, cursor)
def _row_to_python(self, row):
# see the base class
res = {}
idx = 0
for col in row:
col_name = self._column_idx_to_name[idx]
conv = self._column_converter[idx]
try:
res[col_name] = col if conv is None or col is None else conv(col)
except Exception as e:
col_desc = self._cursor.description[idx]
msg = 'Failed to convert: ' \
'field {name}: {type}::{value}, Error: ' \
'{error}'.format(
name=col_desc[0],
type=FIELD_ID_TO_NAME[col_desc[1]],
value=col,
error=e
)
logger.exception(msg)
Error.errorhandler_wrapper(
self._connection, self._cursor, InterfaceError, {
'msg': msg,
'errno': ER_FAILED_TO_CONVERT_ROW_TO_PYTHON_TYPE,
})
idx += 1
return res
| 38.314721 | 93 | 0.54213 |
4cad8d1390c13b0df7f0e741490ef75a752939cc
| 22,267 |
py
|
Python
|
salt/states/saltmod.py
|
minaguib/salt
|
2a1066eee18928d1d86a126f69c6d70a86b83cca
|
[
"Apache-2.0"
] | null | null | null |
salt/states/saltmod.py
|
minaguib/salt
|
2a1066eee18928d1d86a126f69c6d70a86b83cca
|
[
"Apache-2.0"
] | null | null | null |
salt/states/saltmod.py
|
minaguib/salt
|
2a1066eee18928d1d86a126f69c6d70a86b83cca
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Control the Salt command interface
==================================
This state is intended for use from the Salt Master. It provides access to
sending commands down to minions as well as access to executing master-side
modules. These state functions wrap Salt's :ref:`Python API <python-api>`.
.. versionadded: 2016.11.0
Support for masterless minions was added to the ``salt.state`` function,
so they can run orchestration sls files. This is particularly useful when
the rendering of a state is dependent on the execution of another state.
Orchestration will render and execute each orchestration block
independently, while honoring requisites to ensure the states are applied
in the correct order.
.. seealso:: More Orchestrate documentation
* :ref:`Full Orchestrate Tutorial <orchestrate-runner>`
* :py:func:`The Orchestrate runner <salt.runners.state.orchestrate>`
'''
from __future__ import absolute_import
# Import python libs
import fnmatch
import logging
import time
# Import salt libs
import salt.syspaths
import salt.utils
import salt.utils.event
import salt.ext.six as six
from salt.ext.six import string_types
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'salt'
def __virtual__():
'''
Named salt
'''
return __virtualname__
def _fire_args(tag_data):
try:
salt.utils.event.fire_args(__opts__,
__orchestration_jid__,
tag_data,
'run')
except NameError:
log.debug(
'Unable to fire args event due to missing __orchestration_jid__'
)
def state(name,
tgt,
ssh=False,
tgt_type='glob',
expr_form=None,
ret='',
highstate=None,
sls=None,
top=None,
saltenv=None,
test=False,
pillar=None,
expect_minions=False,
fail_minions=None,
allow_fail=0,
concurrent=False,
timeout=None,
batch=None,
queue=False,
orchestration_jid=None):
'''
Invoke a state run on a given target
name
An arbitrary name used to track the state execution
tgt
The target specification for the state run.
.. versionadded: 2016.11.0
Masterless support: When running on a masterless minion, the ``tgt``
is ignored and will always be the local minion.
tgt_type
The target type to resolve, defaults to ``glob``
expr_form
.. deprecated:: Nitrogen
Use tgt_type instead
ret
Optionally set a single or a list of returners to use
highstate
Defaults to None, if set to True the target systems will ignore any
sls references specified in the sls option and call state.highstate
on the targeted minions
top
Should be the name of a top file. If set state.top is called with this
top file instead of state.sls.
sls
A group of sls files to execute. This can be defined as a single string
containing a single sls file, or a list of sls files
test
Pass ``test=true`` through to the state function
pillar
Pass the ``pillar`` kwarg through to the state function
saltenv
The default salt environment to pull sls files from
ssh
Set to `True` to use the ssh client instead of the standard salt client
roster
In the event of using salt-ssh, a roster system can be set
expect_minions
An optional boolean for failing if some minions do not respond
fail_minions
An optional list of targeted minions where failure is an option
allow_fail
Pass in the number of minions to allow for failure before setting
the result of the execution to False
concurrent
Allow multiple state runs to occur at once.
WARNING: This flag is potentially dangerous. It is designed
for use when multiple state runs can safely be run at the same
Do not use this flag for performance optimization.
queue
Pass ``queue=true`` through to the state function
batch
Execute the command :ref:`in batches <targeting-batch>`. E.g.: ``10%``.
.. versionadded:: 2016.3.0
Examples:
Run a list of sls files via :py:func:`state.sls <salt.state.sls>` on target
minions:
.. code-block:: yaml
webservers:
salt.state:
- tgt: 'web*'
- sls:
- apache
- django
- core
- saltenv: prod
Run a full :py:func:`state.highstate <salt.state.highstate>` on target
mininons.
.. code-block:: yaml
databases:
salt.state:
- tgt: role:database
- tgt_type: grain
- highstate: True
'''
cmd_kw = {'arg': [], 'kwarg': {}, 'ret': ret, 'timeout': timeout}
state_ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
try:
allow_fail = int(allow_fail)
except ValueError:
state_ret['result'] = False
state_ret['comment'] = 'Passed invalid value for \'allow_fail\', must be an int'
return state_ret
# remember to remove the expr_form argument from this function when
# performing the cleanup on this deprecation.
if expr_form is not None:
salt.utils.warn_until(
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
cmd_kw['tgt_type'] = tgt_type
cmd_kw['ssh'] = ssh
cmd_kw['expect_minions'] = expect_minions
if highstate:
fun = 'state.highstate'
elif top:
fun = 'state.top'
cmd_kw['arg'].append(top)
elif sls:
fun = 'state.sls'
if isinstance(sls, list):
sls = ','.join(sls)
cmd_kw['arg'].append(sls)
else:
state_ret['comment'] = 'No highstate or sls specified, no execution made'
state_ret['result'] = False
return state_ret
if test or __opts__.get('test'):
cmd_kw['kwarg']['test'] = True
if pillar:
cmd_kw['kwarg']['pillar'] = pillar
cmd_kw['kwarg']['saltenv'] = __env__
cmd_kw['kwarg']['queue'] = queue
if isinstance(concurrent, bool):
cmd_kw['kwarg']['concurrent'] = concurrent
else:
state_ret['comment'] = ('Must pass in boolean for value of \'concurrent\'')
state_ret['result'] = False
return state_ret
if batch is not None:
cmd_kw['batch'] = str(batch)
masterless = __opts__['__role'] == 'minion' and \
__opts__['file_client'] == 'local'
if not masterless:
_fire_args({'type': 'state', 'tgt': tgt, 'name': name, 'args': cmd_kw})
cmd_ret = __salt__['saltutil.cmd'](tgt, fun, **cmd_kw)
else:
if top:
cmd_kw['topfn'] = ''.join(cmd_kw.pop('arg'))
elif sls:
cmd_kw['mods'] = cmd_kw.pop('arg')
tmp_ret = __salt__[fun](**cmd_kw)
cmd_ret = {__opts__['id']: {
'ret': tmp_ret,
'out': tmp_ret.get('out', 'highstate') if
isinstance(tmp_ret, dict) else 'highstate'
}}
try:
state_ret['__jid__'] = cmd_ret[next(iter(cmd_ret))]['jid']
except (StopIteration, KeyError):
pass
changes = {}
fail = set()
failures = {}
no_change = set()
if fail_minions is None:
fail_minions = ()
elif isinstance(fail_minions, string_types):
fail_minions = [minion.strip() for minion in fail_minions.split(',')]
elif not isinstance(fail_minions, list):
state_ret.setdefault('warnings', []).append(
'\'fail_minions\' needs to be a list or a comma separated '
'string. Ignored.'
)
fail_minions = ()
for minion, mdata in six.iteritems(cmd_ret):
if mdata.get('out', '') != 'highstate':
log.warning('Output from salt state not highstate')
m_ret = False
if 'return' in mdata and 'ret' not in mdata:
mdata['ret'] = mdata.pop('return')
m_state = True
if mdata.get('failed', False):
m_state = False
else:
try:
m_ret = mdata['ret']
except KeyError:
m_state = False
if m_state:
m_state = salt.utils.check_state_result(m_ret)
if not m_state:
if minion not in fail_minions:
fail.add(minion)
failures[minion] = m_ret or 'Minion did not respond'
continue
try:
for state_item in six.itervalues(m_ret):
if 'changes' in state_item and state_item['changes']:
changes[minion] = m_ret
break
else:
no_change.add(minion)
except AttributeError:
log.error("m_ret did not have changes %s %s", type(m_ret), m_ret)
no_change.add(minion)
if changes:
state_ret['changes'] = {'out': 'highstate', 'ret': changes}
if len(fail) > allow_fail:
state_ret['result'] = False
state_ret['comment'] = 'Run failed on minions: {0}'.format(', '.join(fail))
else:
state_ret['comment'] = 'States ran successfully.'
if changes:
state_ret['comment'] += ' Updating {0}.'.format(', '.join(changes))
if no_change:
state_ret['comment'] += ' No changes made to {0}.'.format(', '.join(no_change))
if failures:
state_ret['comment'] += '\nFailures:\n'
for minion, failure in six.iteritems(failures):
state_ret['comment'] += '\n'.join(
(' ' * 4 + l)
for l in salt.output.out_format(
{minion: failure},
'highstate',
__opts__,
).splitlines()
)
state_ret['comment'] += '\n'
if test or __opts__.get('test'):
if state_ret['changes'] and state_ret['result'] is True:
# Test mode with changes is the only case where result should ever be none
state_ret['result'] = None
return state_ret
def function(
name,
tgt,
ssh=False,
tgt_type='glob',
expr_form=None,
ret='',
expect_minions=False,
fail_minions=None,
fail_function=None,
arg=None,
kwarg=None,
timeout=None,
batch=None):
'''
Execute a single module function on a remote minion via salt or salt-ssh
name
The name of the function to run, aka cmd.run or pkg.install
tgt
The target specification, aka '*' for all minions
tgt_type
The target type, defaults to ``glob``
expr_form
.. deprecated:: Nitrogen
Use tgt_type instead
arg
The list of arguments to pass into the function
kwarg
The dict (not a list) of keyword arguments to pass into the function
ret
Optionally set a single or a list of returners to use
expect_minions
An optional boolean for failing if some minions do not respond
fail_minions
An optional list of targeted minions where failure is an option
fail_function
An optional string that points to a salt module that returns True or False
based on the returned data dict for individual minions
ssh
Set to `True` to use the ssh client instead of the standard salt client
'''
func_ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if kwarg is None:
kwarg = {}
if isinstance(arg, str):
func_ret['warnings'] = ['Please specify \'arg\' as a list, not a string. '
'Modifying in place, but please update SLS file '
'to remove this warning.']
arg = arg.split()
cmd_kw = {'arg': arg or [], 'kwarg': kwarg, 'ret': ret, 'timeout': timeout}
# remember to remove the expr_form argument from this function when
# performing the cleanup on this deprecation.
if expr_form is not None:
salt.utils.warn_until(
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
if batch is not None:
cmd_kw['batch'] = str(batch)
cmd_kw['tgt_type'] = tgt_type
cmd_kw['ssh'] = ssh
cmd_kw['expect_minions'] = expect_minions
cmd_kw['_cmd_meta'] = True
fun = name
if __opts__['test'] is True:
func_ret['comment'] = (
'Function {0} will be executed on target {1} as test={2}'
).format(fun, tgt, str(False))
func_ret['result'] = None
return func_ret
try:
_fire_args({'type': 'function', 'tgt': tgt, 'name': name, 'args': cmd_kw})
cmd_ret = __salt__['saltutil.cmd'](tgt, fun, **cmd_kw)
except Exception as exc:
func_ret['result'] = False
func_ret['comment'] = str(exc)
return func_ret
try:
func_ret['__jid__'] = cmd_ret[next(iter(cmd_ret))]['jid']
except (StopIteration, KeyError):
pass
changes = {}
fail = set()
failures = {}
if fail_minions is None:
fail_minions = ()
elif isinstance(fail_minions, string_types):
fail_minions = [minion.strip() for minion in fail_minions.split(',')]
elif not isinstance(fail_minions, list):
func_ret.setdefault('warnings', []).append(
'\'fail_minions\' needs to be a list or a comma separated '
'string. Ignored.'
)
fail_minions = ()
for minion, mdata in six.iteritems(cmd_ret):
m_ret = False
if mdata.get('retcode'):
func_ret['result'] = False
fail.add(minion)
if mdata.get('failed', False):
m_func = False
else:
if 'return' in mdata and 'ret' not in mdata:
mdata['ret'] = mdata.pop('return')
m_ret = mdata['ret']
m_func = (not fail_function and True) or __salt__[fail_function](m_ret)
if not m_func:
if minion not in fail_minions:
fail.add(minion)
failures[minion] = m_ret and m_ret or 'Minion did not respond'
continue
changes[minion] = m_ret
if not cmd_ret:
func_ret['result'] = False
func_ret['command'] = 'No minions responded'
else:
if changes:
func_ret['changes'] = {'out': 'highstate', 'ret': changes}
if fail:
func_ret['result'] = False
func_ret['comment'] = 'Running function {0} failed on minions: {1}'.format(name, ', '.join(fail))
else:
func_ret['comment'] = 'Function ran successfully.'
if changes:
func_ret['comment'] += ' Function {0} ran on {1}.'.format(name, ', '.join(changes))
if failures:
func_ret['comment'] += '\nFailures:\n'
for minion, failure in six.iteritems(failures):
func_ret['comment'] += '\n'.join(
(' ' * 4 + l)
for l in salt.output.out_format(
{minion: failure},
'highstate',
__opts__,
).splitlines()
)
func_ret['comment'] += '\n'
return func_ret
def wait_for_event(
name,
id_list,
event_id='id',
timeout=300,
node='master'):
'''
Watch Salt's event bus and block until a condition is met
.. versionadded:: 2014.7.0
name
An event tag to watch for; supports Reactor-style globbing.
id_list
A list of event identifiers to watch for -- usually the minion ID. Each
time an event tag is matched the event data is inspected for
``event_id``, if found it is removed from ``id_list``. When ``id_list``
is empty this function returns success.
event_id : id
The name of a key in the event data. Default is ``id`` for the minion
ID, another common value is ``name`` for use with orchestrating
salt-cloud events.
timeout : 300
The maximum time in seconds to wait before failing.
The following example blocks until all the listed minions complete a
restart and reconnect to the Salt master:
.. code-block:: yaml
reboot_all_minions:
salt.function:
- name: system.reboot
- tgt: '*'
wait_for_reboots:
salt.wait_for_event:
- name: salt/minion/*/start
- id_list:
- jerry
- stuart
- dave
- phil
- kevin
- mike
- require:
- salt: reboot_all_minions
'''
ret = {'name': name, 'changes': {}, 'comment': '', 'result': False}
if __opts__.get('test'):
ret['comment'] = \
'Orchestration would wait for event \'{0}\''.format(name)
ret['result'] = None
return ret
sevent = salt.utils.event.get_event(
node,
__opts__['sock_dir'],
__opts__['transport'],
opts=__opts__,
listen=True)
del_counter = 0
starttime = time.time()
timelimit = starttime + timeout
while True:
event = sevent.get_event(full=True)
is_timedout = time.time() > timelimit
if event is None and not is_timedout:
log.trace("wait_for_event: No event data; waiting.")
continue
elif event is None and is_timedout:
ret['comment'] = 'Timeout value reached.'
return ret
if fnmatch.fnmatch(event['tag'], name):
val = event['data'].get(event_id)
if val is not None:
try:
val_idx = id_list.index(val)
except ValueError:
log.trace("wait_for_event: Event identifier '{0}' not in "
"id_list; skipping.".format(event_id))
else:
del id_list[val_idx]
del_counter += 1
minions_seen = ret['changes'].setdefault('minions_seen', [])
minions_seen.append(val)
log.debug("wait_for_event: Event identifier '{0}' removed "
"from id_list; {1} items remaining."
.format(val, len(id_list)))
else:
log.trace("wait_for_event: Event identifier '{0}' not in event "
"'{1}'; skipping.".format(event_id, event['tag']))
else:
log.debug("wait_for_event: Skipping unmatched event '{0}'"
.format(event['tag']))
if len(id_list) == 0:
ret['result'] = True
ret['comment'] = 'All events seen in {0} seconds.'.format(
time.time() - starttime)
return ret
if is_timedout:
ret['comment'] = 'Timeout value reached.'
return ret
def runner(name, **kwargs):
'''
Execute a runner module on the master
.. versionadded:: 2014.7.0
name
The name of the function to run
kwargs
Any keyword arguments to pass to the runner function
.. code-block:: yaml
run-manage-up:
salt.runner:
- name: manage.up
'''
try:
jid = __orchestration_jid__
except NameError:
log.debug(
'Unable to fire args event due to missing __orchestration_jid__'
)
jid = None
out = __salt__['saltutil.runner'](name,
__orchestration_jid__=jid,
__env__=__env__,
**kwargs)
runner_return = out.get('return')
if 'success' in out and not out['success']:
ret = {
'name': name,
'result': False,
'changes': {},
'comment': runner_return if runner_return else "Runner function '{0}' failed without comment.".format(name)
}
else:
ret = {
'name': name,
'result': True,
'changes': runner_return if runner_return else {},
'comment': "Runner function '{0}' executed.".format(name)
}
ret['__orchestration__'] = True
if 'jid' in out:
ret['__jid__'] = out['jid']
return ret
def wheel(name, **kwargs):
'''
Execute a wheel module on the master
.. versionadded:: 2014.7.0
name
The name of the function to run
kwargs
Any keyword arguments to pass to the wheel function
.. code-block:: yaml
accept_minion_key:
salt.wheel:
- name: key.accept
- match: frank
'''
ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''}
try:
jid = __orchestration_jid__
except NameError:
log.debug(
'Unable to fire args event due to missing __orchestration_jid__'
)
jid = None
out = __salt__['saltutil.wheel'](name,
__orchestration_jid__=jid,
__env__=__env__,
**kwargs)
ret['result'] = True
ret['comment'] = "Wheel function '{0}' executed.".format(name)
ret['__orchestration__'] = True
if 'jid' in out:
ret['__jid__'] = out['jid']
runner_return = out.get('return')
if runner_return:
ret['changes'] = runner_return
return ret
| 30.670799 | 119 | 0.555037 |
873ec0bad66391bafa20243995238752578b66fa
| 263 |
py
|
Python
|
aclark/db/migrations/0022_remove_project_user_hours.py
|
aclark4life/aclark-net-1
|
e256bfdd63ad4445bf0a75ef0b91f6e1fd2479ea
|
[
"MIT"
] | null | null | null |
aclark/db/migrations/0022_remove_project_user_hours.py
|
aclark4life/aclark-net-1
|
e256bfdd63ad4445bf0a75ef0b91f6e1fd2479ea
|
[
"MIT"
] | null | null | null |
aclark/db/migrations/0022_remove_project_user_hours.py
|
aclark4life/aclark-net-1
|
e256bfdd63ad4445bf0a75ef0b91f6e1fd2479ea
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.9 on 2019-07-02 20:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("db", "0021_project_user_hours")]
operations = [migrations.RemoveField(model_name="project", name="user_hours")]
| 23.909091 | 82 | 0.737643 |
2b0bb60cb4296b4d430050b8022b60feb646ce0e
| 4,945 |
py
|
Python
|
project/target/node-modules/webjars/npm/node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py
|
bet-gram/betgram-webapp-backend
|
c82824e2117a81e582a4b9707849e6b7f064be59
|
[
"Apache-2.0"
] | 2,494 |
2015-02-11T04:34:13.000Z
|
2022-03-31T14:21:47.000Z
|
project/target/node-modules/webjars/npm/node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py
|
bet-gram/betgram-webapp-backend
|
c82824e2117a81e582a4b9707849e6b7f064be59
|
[
"Apache-2.0"
] | 1,432 |
2017-06-21T04:08:48.000Z
|
2020-08-25T16:21:15.000Z
|
project/target/node-modules/webjars/npm/node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py
|
bet-gram/betgram-webapp-backend
|
c82824e2117a81e582a4b9707849e6b7f064be59
|
[
"Apache-2.0"
] | 442 |
2015-02-12T13:45:46.000Z
|
2022-03-21T05:28:05.000Z
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
try:
xml_string = xml_string.encode(encoding)
except Exception:
xml_string = unicode(xml_string, 'latin-1').encode(encoding)
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
| 30.337423 | 80 | 0.651972 |
fcb0f94bed86d2a845dd6d243a355704e88a3577
| 3,745 |
py
|
Python
|
src/aegis/modules/interpreter.py
|
martinbagic/aegis-exercise
|
2bef0828dea456f2f66c9288daf095e4dceca40c
|
[
"MIT"
] | null | null | null |
src/aegis/modules/interpreter.py
|
martinbagic/aegis-exercise
|
2bef0828dea456f2f66c9288daf095e4dceca40c
|
[
"MIT"
] | null | null | null |
src/aegis/modules/interpreter.py
|
martinbagic/aegis-exercise
|
2bef0828dea456f2f66c9288daf095e4dceca40c
|
[
"MIT"
] | 1 |
2021-08-24T15:31:30.000Z
|
2021-08-24T15:31:30.000Z
|
import numpy as np
from aegis.panconfiguration import pan
class Interpreter:
"""Interpreter of genomic loci
Transforms bool array into an array of numbers.
These numbers can be loosely understood as gene activity.
"""
exp_base = 0.5 # Important for _exp
binary_exp_base = 0.98 # Important for _binary_exp
def __init__(self, BITS_PER_LOCUS):
# Parameters for the binary interpreter
self.binary_weights = 2 ** np.arange(BITS_PER_LOCUS)[::-1]
self.binary_max = self.binary_weights.sum()
# Parameters for the binary switch interpreter
self.binary_switch_weights = self.binary_weights.copy()
self.binary_switch_weights[-1] = 0 # Switch bit does not add to locus value
self.binary_switch_max = self.binary_switch_weights.sum()
def __call__(self, loci, interpreter_kind):
"""Exposed method"""
interpreter = getattr(self, f"_{interpreter_kind}")
loci = self._diploid_to_haploid(loci)
interpretome = interpreter(loci)
return interpretome
def _diploid_to_haploid(self, loci):
"""Merge two arrays encoding two chromatids into one array.
The two chromatids contribute equally to bits, so that 1+1->1, 0+0->0 and 1+0->0.5 (as well as 0+1->0.5).
Arguments:
loci: A bool numpy array with shape (population size, ploidy, gstruc.length, BITS_PER_LOCUS)
Returns:
A bool numpy array with shape (population size, gstruc.length, BITS_PER_LOCUS)
"""
return loci.mean(1)
def _binary(self, loci):
"""Interpret locus as a binary number and normalize.
High resolution (can produce 2^bits_per_locus different numbers).
Position-dependent.
"""
return loci.dot(self.binary_weights) / self.binary_max
def _switch(self, loci):
"""Return 0 if all bits are 0; 1 if all bits are 1; 0 or 1 randomly otherwise.
Low resolution (can produce 2 different numbers).
Position-independent.
"""
sums = loci.mean(2)
rand_values = pan.rng.random(loci.shape[:-1]) < 0.5
return np.select(
[sums == 0, (sums > 0) & (sums < 1), sums == 1], [0, rand_values, 1]
)
def _binary_switch(self, loci):
"""Interpret first n-1 bits as a binary number if the last bit is 1.
High resolution (can produce 2^(bits_per_locus-1) different numbers).
Position-dependent.
"""
where_on = loci[:, :, -1] == 1 # Loci which are turned on
values = np.zeros(loci.shape[:-1], float) # Initialize output array with zeros
values[where_on] = (
loci[where_on].dot(self.binary_switch_weights) / self.binary_switch_max
) # If the locus is turned on, make the value in the output array be the binary value
return values
def _uniform(self, loci):
"""Return normalized sum of bits.
Medium resolution (can produce bits_per_locus+1 different numbers).
Position-independent.
"""
return loci.sum(-1) / loci.shape[-1]
def _exp(self, loci):
"""Return base^total_number_of_zeros.
Medium resolution (can produce bits_per_locus+1 different numbers).
Suitable for generating very small numbers.
Position-independent.
"""
return self.exp_base ** np.sum(1 - loci, axis=2)
def _binary_exp(self, loci):
"""Return base^binary_value_of_locus
High resolution (can produce 2^bits_per_locus different numbers).
Suitable for generating very small numbers.
Position-dependent.
"""
binary = self._binary(loci)
return self.binary_exp_base ** binary
| 35 | 113 | 0.640587 |
9e59441515791ed6639a8b7b763d6ed29bf93ff2
| 3,817 |
py
|
Python
|
eclipse-mosquitto/test/broker/04-retain-check-source-persist.py
|
HenriqueBuzin/mosquitto-eclipse-mqtt
|
00468923fcf70eefdf2c707b6ba9bdd4f859faf2
|
[
"Unlicense"
] | 2 |
2021-04-20T14:28:59.000Z
|
2021-05-06T07:46:53.000Z
|
eclipse-mosquitto/test/broker/04-retain-check-source-persist.py
|
HenriqueBuzin/mosquitto-eclipse-mqtt
|
00468923fcf70eefdf2c707b6ba9bdd4f859faf2
|
[
"Unlicense"
] | null | null | null |
eclipse-mosquitto/test/broker/04-retain-check-source-persist.py
|
HenriqueBuzin/mosquitto-eclipse-mqtt
|
00468923fcf70eefdf2c707b6ba9bdd4f859faf2
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
# Test for CVE-2018-12546, with the broker being stopped to write the persistence file.
from mosq_test_helper import *
import signal
def write_config(filename, port, per_listener):
with open(filename, 'w') as f:
f.write("per_listener_settings %s\n" % (per_listener))
f.write("check_retain_source true\n")
f.write("port %d\n" % (port))
f.write("allow_anonymous true\n")
f.write("acl_file %s\n" % (filename.replace('.conf', '.acl')))
f.write("persistence true\n")
f.write("persistence_file %s\n" % (filename.replace('.conf', '.db')))
def write_acl_1(filename, username):
with open(filename, 'w') as f:
if username is not None:
f.write('user %s\n' % (username))
f.write('topic readwrite test/topic\n')
def write_acl_2(filename, username):
with open(filename, 'w') as f:
if username is not None:
f.write('user %s\n' % (username))
f.write('topic read test/topic\n')
def do_test(proto_ver, per_listener, username):
conf_file = os.path.basename(__file__).replace('.py', '.conf')
write_config(conf_file, port, per_listener)
persistence_file = os.path.basename(__file__).replace('.py', '.db')
try:
os.remove(persistence_file)
except OSError:
pass
acl_file = os.path.basename(__file__).replace('.py', '.acl')
write_acl_1(acl_file, username)
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("retain-check", keepalive=keepalive, username=username, proto_ver=proto_ver)
connack_packet = mosq_test.gen_connack(rc=0, proto_ver=proto_ver)
mid = 1
publish_packet = mosq_test.gen_publish("test/topic", qos=0, payload="retained message", retain=True, proto_ver=proto_ver)
subscribe_packet = mosq_test.gen_subscribe(mid, "test/topic", 0, proto_ver=proto_ver)
suback_packet = mosq_test.gen_suback(mid, 0, proto_ver=proto_ver)
broker = mosq_test.start_broker(filename=os.path.basename(__file__), use_conf=True, port=port)
try:
sock = mosq_test.do_client_connect(connect_packet, connack_packet, port=port)
sock.send(publish_packet)
sock.close()
sock = mosq_test.do_client_connect(connect_packet, connack_packet, port=port)
mosq_test.do_send_receive(sock, subscribe_packet, suback_packet, "suback 1")
mosq_test.expect_packet(sock, "publish", publish_packet)
sock.close()
# Remove "write" ability
write_acl_2(acl_file, username)
broker.terminate()
broker.wait()
broker = mosq_test.start_broker(filename=os.path.basename(__file__), use_conf=True, port=port)
sock = mosq_test.do_client_connect(connect_packet, connack_packet, port=port)
mosq_test.do_send_receive(sock, subscribe_packet, suback_packet, "suback 2")
# If we receive the retained message here, it is a failure.
mosq_test.do_ping(sock)
rc = 0
sock.close()
except mosq_test.TestError:
pass
finally:
broker.terminate()
broker.wait()
os.remove(conf_file)
os.remove(acl_file)
os.remove(persistence_file)
(stdo, stde) = broker.communicate()
if rc:
print(stde.decode('utf-8'))
exit(rc)
port = mosq_test.get_port()
do_test(proto_ver=4, per_listener="true", username=None)
do_test(proto_ver=4, per_listener="true", username="test")
do_test(proto_ver=4, per_listener="false", username=None)
do_test(proto_ver=4, per_listener="false", username="test")
do_test(proto_ver=5, per_listener="true", username=None)
do_test(proto_ver=5, per_listener="true", username="test")
do_test(proto_ver=5, per_listener="false", username=None)
do_test(proto_ver=5, per_listener="false", username="test")
| 36.009434 | 125 | 0.6754 |
76f99d00d0ec990c1786ff6e286fa3ee5f83a437
| 13,181 |
py
|
Python
|
tensorflow/python/saved_model/builder.py
|
jianbaishi/tensorflow-learn
|
59354802b773ca8e1be20cb1d04bf9abb03cf1ad
|
[
"Apache-2.0"
] | 1 |
2017-03-24T12:08:25.000Z
|
2017-03-24T12:08:25.000Z
|
tensorflow/python/saved_model/builder.py
|
jianbaishi/tensorflow-learn
|
59354802b773ca8e1be20cb1d04bf9abb03cf1ad
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/saved_model/builder.py
|
jianbaishi/tensorflow-learn
|
59354802b773ca8e1be20cb1d04bf9abb03cf1ad
|
[
"Apache-2.0"
] | null | null | null |
## Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel builder.
Builds a SavedModel that can be saved to storage, is language neutral, and
enables systems to produce, consume, or transform TensorFlow Models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf.any_pb2 import Any
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging
from tensorflow.python.saved_model import constants
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.util import compat
class SavedModelBuilder(object):
"""Builds the `SavedModel` protocol buffer and saves variables and assets.
The `SavedModelBuilder` class provides functionality to build a `SavedModel`
protocol buffer. Specifically, this allows multiple meta graphs to be saved as
part of a single language-neutral `SavedModel`, while sharing variables and
assets.
To build a SavedModel, the first meta graph must be saved with variables.
Subsequent meta graphs will simply be saved with their graph definitions. If
assets need to be saved and written or copied to disk, they must be provided
as part of the first meta graph to be saved. Subsequent meta graphs can
provide a subset of the initial assets to be added to the SavedModel
definition.
Each meta graph added to the SavedModel must be annotated with tags. The tags
provide a means to identify the specific meta graph to load and restore, along
with the shared set of variables and assets.
Typical usage for the `SavedModelBuilder`:
```python
...
builder = saved_model_builder.SavedModelBuilder(export_dir)
with tf.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph_and_variables(sess,
["foo-tag"],
signature_def_map=foo_signatures,
asset_collection=foo_assets)
...
with tf.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph(["bar-tag", "baz-tag"])
...
builder.save()
```
"""
def __init__(self, export_dir):
self._saved_model = saved_model_pb2.SavedModel()
self._saved_model.saved_model_schema_version = (
constants.SAVED_MODEL_SCHEMA_VERSION)
self._export_dir = export_dir
if not file_io.file_exists(export_dir):
file_io.recursive_create_dir(self._export_dir)
# Boolean to track whether variables and assets corresponding to the
# SavedModel have been saved. Specifically, the first meta graph to be added
# MUST use the add_meta_graph_and_variables() API. Subsequent add operations
# on the SavedModel MUST use the add_meta_graph() API which does not save
# weights.
self._has_saved_variables = False
def _asset_path_from_tensor(self, path_tensor):
"""Returns the filepath value stored in constant `path_tensor`.
Args:
path_tensor: Tensor of a file-path.
Returns:
The string value i.e. path of the tensor, if valid.
Raises:
TypeError if tensor does not match expected op type, dtype or value.
"""
if not isinstance(path_tensor, ops.Tensor):
raise TypeError("Asset path tensor must be a Tensor.")
if path_tensor.op.type != "Const":
raise TypeError("Asset path tensor must be of type constant.")
if path_tensor.dtype != dtypes.string:
raise TypeError("Asset path tensor must be of dtype string.")
str_values = path_tensor.op.get_attr("value").string_val
if len(str_values) != 1:
raise TypeError("Asset path tensor must be a scalar.")
return str_values[0]
def _add_asset_to_collection(self, asset_filename, asset_tensor):
"""Builds an asset proto and adds it to the asset collection of the graph.
Args:
asset_filename: The filename of the asset to be added.
asset_tensor: The asset tensor used to populate the tensor info of the
asset proto.
"""
asset_proto = meta_graph_pb2.AssetFileDef()
asset_proto.filename = asset_filename
asset_proto.tensor_info.name = asset_tensor.name
asset_any_proto = Any()
asset_any_proto.Pack(asset_proto)
ops.add_to_collection(constants.ASSETS_KEY, asset_any_proto)
def _save_and_write_assets(self, assets_collection_to_add=None):
"""Saves asset to the meta graph and writes asset files to disk.
Args:
assets_collection_to_add: The collection where the asset paths are setup.
"""
asset_source_filepath_list = self._save_assets(assets_collection_to_add)
# Return if there are no assets to write.
if len(asset_source_filepath_list) is 0:
tf_logging.info("No assets to write.")
return
assets_destination_dir = os.path.join(
compat.as_bytes(self._export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY))
if not file_io.file_exists(assets_destination_dir):
file_io.recursive_create_dir(assets_destination_dir)
# Copy each asset from source path to destination path.
for asset_source_filepath in asset_source_filepath_list:
asset_source_filename = os.path.basename(asset_source_filepath)
asset_destination_filepath = os.path.join(
compat.as_bytes(assets_destination_dir),
compat.as_bytes(asset_source_filename))
file_io.copy(
asset_source_filepath, asset_destination_filepath, overwrite=True)
tf_logging.info("Assets written to: %s", assets_destination_dir)
def _save_assets(self, assets_collection_to_add=None):
"""Saves assets to the meta graph.
Args:
assets_collection_to_add: The collection where the asset paths are setup.
Returns:
The list of filepaths to the assets in the assets collection.
Raises:
ValueError: Indicating an invalid filepath tensor.
"""
asset_source_filepath_list = []
if assets_collection_to_add is None:
tf_logging.info("No assets to save.")
return asset_source_filepath_list
# Iterate over the supplied asset collection, build the `AssetFile` proto
# and add them to the collection with key `constants.ASSETS_KEY`, in the
# graph.
for asset_tensor in assets_collection_to_add:
asset_source_filepath = self._asset_path_from_tensor(asset_tensor)
if not asset_source_filepath:
raise ValueError("Invalid asset filepath tensor %s" % asset_tensor)
asset_source_filename = os.path.basename(asset_source_filepath)
# Build `AssetFile` proto and add it to the asset collection in the graph.
self._add_asset_to_collection(asset_source_filename, asset_tensor)
asset_source_filepath_list.append(asset_source_filepath)
tf_logging.info("Assets added to graph.")
return asset_source_filepath_list
def _tag_and_add_meta_graph(self, meta_graph_def, tags, signature_def_map):
"""Tags the meta graph def and adds it to the SavedModel.
Tags the meta graph def with the supplied tags, adds signature defs to it if
provided and appends the meta graph def to the SavedModel proto.
Args:
meta_graph_def: The meta graph def to add to the SavedModel.
tags: The set of tags to annotate the meta graph def with.
signature_def_map: The map of signature defs to be added to the meta graph
def.
"""
for tag in tags:
meta_graph_def.meta_info_def.tags.append(tag)
if signature_def_map is not None:
for key in signature_def_map:
meta_graph_def.signature_def[key].CopyFrom(signature_def_map[key])
proto_meta_graph_def = self._saved_model.meta_graphs.add()
proto_meta_graph_def.CopyFrom(meta_graph_def)
def add_meta_graph(self, tags, signature_def_map=None,
assets_collection=None):
"""Adds the current meta graph to the SavedModel.
Creates a Saver in the current scope and uses the Saver to export the meta
graph def. Invoking this API requires the `add_meta_graph_and_variables()`
API to have been invoked before.
Args:
tags: The set of tags to annotate the meta graph def with.
signature_def_map: The map of signature defs to be added to the meta graph
def.
assets_collection: Assets collection to be saved with SavedModel. Note
that this collection should be a subset of the assets saved as part of
the first meta graph in the SavedModel.
Raises:
AssertionError: If the variables for the SavedModel have not been saved
yet.
"""
if not self._has_saved_variables:
raise AssertionError(
"Variables and assets have not been saved yet. "
"Please invoke `add_meta_graph_and_variables()` first.")
# Save asset files, if any.
self._save_assets(assets_collection)
saver = tf_saver.Saver(variables.all_variables())
meta_graph_def = saver.export_meta_graph()
# Tag the meta graph def and add it to the SavedModel.
self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
def add_meta_graph_and_variables(self,
sess,
tags,
signature_def_map=None,
assets_collection=None):
"""Adds the current meta graph to the SavedModel and saves variables.
Creates a Saver to save the variables from the provided session. Exports the
corresponding meta graph def. This function assumes that the variables to be
saved have been initialized. For a given `SavedModelBuilder`, this API must
be called exactly once and for the first meta graph to save. For subsequent
meta graph defs to be added, the `add_meta_graph()` API must be used.
Args:
sess: The TensorFlow session from which to save the meta graph and
variables.
tags: The set of tags with which to save the meta graph.
signature_def_map: The map of signature def map to add to the meta graph
def.
assets_collection: Assets collection to be saved with SavedModel.
"""
if self._has_saved_variables:
raise AssertionError("Variables and assets have already been saved. "
"Please invoke `add_meta_graph()` instead.")
# Save asset files and write them to disk, if any.
self._save_and_write_assets(assets_collection)
# Create the variables sub-directory, if it does not exist.
variables_dir = os.path.join(
compat.as_text(self._export_dir),
compat.as_text(constants.VARIABLES_DIRECTORY))
if not file_io.file_exists(variables_dir):
file_io.recursive_create_dir(variables_dir)
variables_path = os.path.join(
compat.as_text(variables_dir),
compat.as_text(constants.VARIABLES_FILENAME))
# Save the variables and export meta graph def.
saver = tf_saver.Saver(variables.all_variables())
saver.save(sess, variables_path, write_meta_graph=False)
meta_graph_def = saver.export_meta_graph()
# Tag the meta graph def and add it to the SavedModel.
self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)
# Mark this instance of SavedModel as having saved variables, such that
# subsequent attempts to save variables will fail.
self._has_saved_variables = True
def save(self, as_text=False):
"""Writes a `SavedModel` protocol buffer to disk.
The function writes the SavedModel protocol buffer to the export directory
in serialized format.
Args:
as_text: Writes the SavedModel protocol buffer in text format to disk.
Returns:
The path to which the SavedModel protocol buffer was written.
"""
if not file_io.file_exists(self._export_dir):
file_io.recursive_create_dir(self._export_dir)
if as_text:
path = os.path.join(
compat.as_bytes(self._export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))
file_io.write_string_to_file(path, str(self._saved_model))
else:
path = os.path.join(
compat.as_bytes(self._export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
file_io.write_string_to_file(path, self._saved_model.SerializeToString())
tf_logging.info("SavedModel written to: %s", path)
return path
| 38.767647 | 80 | 0.718534 |
cfc32e398bd430a29ed3a1e25457f822c9b39d7f
| 10,804 |
py
|
Python
|
coredis/nodemanager.py
|
dynalz/coredis
|
54c95a323897a9742bf30ceff67141d9a1cfc97a
|
[
"MIT"
] | null | null | null |
coredis/nodemanager.py
|
dynalz/coredis
|
54c95a323897a9742bf30ceff67141d9a1cfc97a
|
[
"MIT"
] | null | null | null |
coredis/nodemanager.py
|
dynalz/coredis
|
54c95a323897a9742bf30ceff67141d9a1cfc97a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import random
from coredis.exceptions import ConnectionError, RedisClusterException
from coredis.utils import b, hash_slot
class NodeManager:
"""
TODO: document
"""
RedisClusterHashSlots = 16384
def __init__(
self,
startup_nodes=None,
reinitialize_steps=None,
skip_full_coverage_check=False,
nodemanager_follow_cluster=False,
**connection_kwargs,
):
"""
:skip_full_coverage_check:
Skips the check of cluster-require-full-coverage config, useful for clusters
without the CONFIG command (like aws)
:nodemanager_follow_cluster:
The node manager will during initialization try the last set of nodes that
it was operating on. This will allow the client to drift along side the cluster
if the cluster nodes move around a slot.
"""
self.connection_kwargs = connection_kwargs
self.nodes = {}
self.slots = {}
self.startup_nodes = [] if startup_nodes is None else startup_nodes
self.orig_startup_nodes = self.startup_nodes[:]
self.reinitialize_counter = 0
self.reinitialize_steps = reinitialize_steps or 25
self._skip_full_coverage_check = skip_full_coverage_check
self.nodemanager_follow_cluster = nodemanager_follow_cluster
if not self.startup_nodes:
raise RedisClusterException("No startup nodes provided")
def encode(self, value):
"""Returns a bytestring representation of the value"""
if isinstance(value, bytes):
return value
elif isinstance(value, int):
value = b(str(value))
elif isinstance(value, float):
value = b(repr(value))
elif not isinstance(value, str):
value = str(value)
if isinstance(value, str):
value = value.encode()
return value
def keyslot(self, key):
"""Calculates keyslot for a given key"""
key = self.encode(key)
return hash_slot(key)
def node_from_slot(self, slot):
for node in self.slots[slot]:
if node["server_type"] == "master":
return node
def all_nodes(self):
for node in self.nodes.values():
yield node
def all_masters(self):
for node in self.nodes.values():
if node["server_type"] == "master":
yield node
def random_startup_node(self):
return random.choice(self.startup_nodes)
def random_startup_node_iter(self):
"""A generator that returns a random startup nodes"""
while True:
yield random.choice(self.startup_nodes)
def random_node(self):
return random.choice(list(self.nodes.values()))
def get_redis_link(self, host, port):
from coredis.client import StrictRedis
allowed_keys = (
"password",
"stream_timeout",
"connect_timeout",
"retry_on_timeout",
"ssl_context",
"parser_class",
"reader_read_size",
"loop",
)
connection_kwargs = {
k: v for k, v in self.connection_kwargs.items() if k in allowed_keys
}
return StrictRedis(
host=host, port=port, decode_responses=True, **connection_kwargs
)
async def initialize(self):
"""
Initializes the slots cache by asking all startup nodes what the
current cluster configuration is.
TODO: Currently the last node will have the last say about how the configuration is setup.
Maybe it should stop to try after it have correctly covered all slots or when one node is
reached and it could execute CLUSTER SLOTS command.
"""
nodes_cache = {}
tmp_slots = {}
all_slots_covered = False
disagreements = []
startup_nodes_reachable = False
nodes = self.orig_startup_nodes
# With this option the client will attempt to connect to any of the previous set of nodes
# instead of the original set of nodes
if self.nodemanager_follow_cluster:
nodes = self.startup_nodes
for node in nodes:
try:
r = self.get_redis_link(host=node["host"], port=node["port"])
cluster_slots = await r.cluster_slots()
startup_nodes_reachable = True
except ConnectionError:
continue
except Exception:
raise RedisClusterException(
f'ERROR sending "cluster slots" command to redis server: {node}'
)
all_slots_covered = True
# If there's only one server in the cluster, its ``host`` is ''
# Fix it to the host in startup_nodes
if len(cluster_slots) == 1 and len(self.startup_nodes) == 1:
single_node_slots = cluster_slots.get(
(0, self.RedisClusterHashSlots - 1)
)[0]
if len(single_node_slots["host"]) == 0:
single_node_slots["host"] = self.startup_nodes[0]["host"]
single_node_slots["server_type"] = "master"
# No need to decode response because StrictRedis should handle that for us...
for min_slot, max_slot in cluster_slots:
nodes = cluster_slots.get((min_slot, max_slot))
master_node, slave_nodes = nodes[0], nodes[1:]
if master_node["host"] == "":
master_node["host"] = node["host"]
self.set_node_name(master_node)
nodes_cache[master_node["name"]] = master_node
for i in range(min_slot, max_slot + 1):
if i not in tmp_slots:
tmp_slots[i] = [master_node]
for slave_node in slave_nodes:
self.set_node_name(slave_node)
nodes_cache[slave_node["name"]] = slave_node
tmp_slots[i].append(slave_node)
else:
# Validate that 2 nodes want to use the same slot cache setup
if tmp_slots[i][0]["name"] != node["name"]:
disagreements.append(
"{0} vs {1} on slot: {2}".format(
tmp_slots[i][0]["name"], node["name"], i
),
)
if len(disagreements) > 5:
raise RedisClusterException(
(
"startup_nodes could not agree on a valid slots cache."
f" {', '.join(disagreements)}"
)
)
self.populate_startup_nodes()
self.refresh_table_asap = False
if self._skip_full_coverage_check:
need_full_slots_coverage = False
else:
need_full_slots_coverage = await self.cluster_require_full_coverage(
nodes_cache
)
# Validate if all slots are covered or if we should try next startup node
for i in range(0, self.RedisClusterHashSlots):
if i not in tmp_slots and need_full_slots_coverage:
all_slots_covered = False
if all_slots_covered:
# All slots are covered and application can continue to execute
break
if not startup_nodes_reachable:
raise RedisClusterException(
"Redis Cluster cannot be connected. "
"Please provide at least one reachable node."
)
if not all_slots_covered:
raise RedisClusterException(
"Not all slots are covered after query all startup_nodes. "
"{0} of {1} covered...".format(
len(tmp_slots), self.RedisClusterHashSlots
)
)
# Set the tmp variables to the real variables
self.slots = tmp_slots
self.nodes = nodes_cache
self.reinitialize_counter = 0
async def increment_reinitialize_counter(self, ct=1):
for i in range(1, ct):
self.reinitialize_counter += 1
if self.reinitialize_counter % self.reinitialize_steps == 0:
await self.initialize()
async def cluster_require_full_coverage(self, nodes_cache):
"""
If exists 'cluster-require-full-coverage no' config on redis servers,
then even all slots are not covered, cluster still will be able to
respond
"""
nodes = nodes_cache or self.nodes
async def node_require_full_coverage(node):
r_node = self.get_redis_link(host=node["host"], port=node["port"])
node_config = await r_node.config_get("cluster-require-full-coverage")
return "yes" in node_config.values()
# at least one node should have cluster-require-full-coverage yes
for node in nodes.values():
if await node_require_full_coverage(node):
return True
return False
def set_node_name(self, n):
"""
Formats the name for the given node object
# TODO: This shold not be constructed this way. It should update the name of the node in
the node cache dict
"""
if "name" not in n:
n["name"] = "{0}:{1}".format(n["host"], n["port"])
def set_node(self, host, port, server_type=None):
"""Updates data for a node"""
node_name = "{0}:{1}".format(host, port)
node = {
"host": host,
"port": port,
"name": node_name,
"server_type": server_type,
}
self.nodes[node_name] = node
return node
def populate_startup_nodes(self):
"""
Do something with all startup nodes and filters out any duplicates
"""
for item in self.startup_nodes:
self.set_node_name(item)
for n in self.nodes.values():
if n not in self.startup_nodes:
self.startup_nodes.append(n)
# freeze it so we can set() it
uniq = {frozenset(node.items()) for node in self.startup_nodes}
# then thaw it back out into a list of dicts
self.startup_nodes = [dict(node) for node in uniq]
async def reset(self):
"""Drops all node data and start over from startup_nodes"""
await self.initialize()
| 36.5 | 98 | 0.562662 |
ee62c91ad9698c740f5562519bb325b1b1e0c76b
| 11,343 |
py
|
Python
|
pandas/tests/indexing/test_ix.py
|
ajspera/pandas
|
f38020f33052ea9029b410d7fae79bc8f249c0ac
|
[
"BSD-3-Clause"
] | 5 |
2019-07-26T15:22:41.000Z
|
2021-09-28T09:22:17.000Z
|
pandas/tests/indexing/test_ix.py
|
ajspera/pandas
|
f38020f33052ea9029b410d7fae79bc8f249c0ac
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/indexing/test_ix.py
|
ajspera/pandas
|
f38020f33052ea9029b410d7fae79bc8f249c0ac
|
[
"BSD-3-Clause"
] | 3 |
2019-07-26T10:47:23.000Z
|
2020-08-10T12:40:32.000Z
|
""" test indexing with ix """
from warnings import catch_warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_scalar
import pandas as pd
from pandas import DataFrame, Series, option_context
from pandas.util import testing as tm
def test_ix_deprecation():
# GH 15114
df = DataFrame({"A": [1, 2, 3]})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=True):
df.ix[1, "A"]
@pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning")
class TestIX:
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({"a": [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], "a"] = -expected.ix[[0, 1, 2], "a"]
with catch_warnings(record=True):
df["a"].ix[[0, 1, 2]] = -df["a"].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({"a": [0, 1, 2], "b": [0, 1, 2]})
with catch_warnings(record=True):
df["a"].ix[[0, 1, 2]] = -df["a"].ix[[0, 1, 2]].astype("float64") + 0.5
expected = DataFrame({"a": [0.5, -0.5, -1.5], "b": [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame(
{
"delta": [1174, 904, 161],
"elapsed": [7673, 9277, 1470],
"timestamp": [1413840976, 1413842580, 1413760580],
}
)
expected = DataFrame(
{
"delta": [1174, 904, 161],
"elapsed": [7673, 9277, 1470],
"timestamp": pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit="s"
),
}
)
df2 = df.copy()
df2["timestamp"] = pd.to_datetime(df["timestamp"], unit="s")
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, "timestamp"] = pd.to_datetime(df["timestamp"], unit="s")
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df["timestamp"], unit="s")
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
assert result == expected
else:
assert expected.equals(result)
# failure cases for .loc, but these work for .ix
df = DataFrame(np.random.randn(5, 4), columns=list("ABCD"))
for key in [
slice(1, 3),
tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]]),
]:
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
tm.makeTimedeltaIndex,
]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
msg = (
r"cannot do slice indexing"
r" on {klass} with these indexers \[(0|1)\] of"
r" {kind}".format(klass=type(df.index), kind=str(int))
)
with pytest.raises(TypeError, match=msg):
df.loc[key]
df = DataFrame(
np.random.randn(5, 4),
columns=list("ABCD"),
index=pd.date_range("2012-01-01", periods=5),
)
for key in [
"2012-01-03",
"2012-01-31",
slice("2012-01-03", "2012-01-03"),
slice("2012-01-03", "2012-01-04"),
slice("2012-01-03", "2012-01-06", 2),
slice("2012-01-03", "2012-01-31"),
tuple([[True, True, True, False, True]]),
]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
with pytest.raises(KeyError, match=r"^'2012-01-31'$"):
df.loc[key]
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list("abde"))
result1 = s["a":"c"]
with catch_warnings(record=True):
result2 = s.ix["a":"c"]
result3 = s.loc["a":"c"]
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({"one": [1, 2, 3, np.nan, np.nan], "two": [1, 2, 3, 4, 5]})
df.loc[df["one"] > 1, "two"] = -df["two"]
expected = DataFrame(
{
"one": {0: 1.0, 1: 2.0, 2: 3.0, 3: np.nan, 4: np.nan},
"two": {0: 1, 1: -2, 2: -3, 3: 4, 4: 5},
}
)
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self, float_frame):
# GH #1142
df = float_frame
df["foo"] = "bar"
orig = df.loc[:, "B"].copy()
df.loc[:, "B"] = df.loc[:, "B"] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({"x": np.arange(10), "y": np.arange(10, 20), "z": "bar"})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.loc[indexer, "y"] = v
assert expected.loc[indexer, "y"] == v
df.loc[df.x % 2 == 0, "y"] = df.loc[df.x % 2 == 0, "y"] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({"a": [1, 2, 3], "b": [0, 1, 2]})
df.loc[[0, 2], "b"] = [100, -100]
expected = DataFrame({"a": [1, 2, 3], "b": [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = DataFrame({"a": list(range(4))})
df["b"] = np.nan
df.loc[[1, 3], "b"] = [100, -100]
expected = DataFrame({"a": [0, 1, 2, 3], "b": [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignment it will work
with option_context("chained_assignment", None):
df = DataFrame({"a": list(range(4))})
df["b"] = np.nan
df["b"].loc[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(
np.arange(16).reshape((4, 4)),
columns=["a", "b", 8, "c"],
index=["e", 7, "f", "g"],
)
with catch_warnings(record=True):
assert df.ix["e", 8] == 2
assert df.loc["e", 8] == 2
with catch_warnings(record=True):
df.ix["e", 8] = 42
assert df.ix["e", 8] == 42
assert df.loc["e", 8] == 42
df.loc["e", 8] = 45
with catch_warnings(record=True):
assert df.ix["e", 8] == 45
assert df.loc["e", 8] == 45
def test_ix_slicing_strings(self):
# see gh-3836
data = {
"Classification": ["SA EQUITY CFD", "bbb", "SA EQUITY", "SA SSF", "aaa"],
"Random": [1, 2, 3, 4, 5],
"X": ["correct", "wrong", "correct", "correct", "wrong"],
}
df = DataFrame(data)
x = df[~df.Classification.isin(["SA EQUITY CFD", "SA EQUITY", "SA SSF"])]
with catch_warnings(record=True):
df.ix[x.index, "X"] = df["Classification"]
expected = DataFrame(
{
"Classification": {
0: "SA EQUITY CFD",
1: "bbb",
2: "SA EQUITY",
3: "SA SSF",
4: "aaa",
},
"Random": {0: 1, 1: 2, 2: 3, 3: 4, 4: 5},
"X": {0: "correct", 1: "bbb", 2: "correct", 3: "correct", 4: "aaa"},
}
) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_ix_setitem_out_of_bounds_axis_0(self):
df = DataFrame(
np.random.randn(2, 5),
index=["row%s" % i for i in range(2)],
columns=["col%s" % i for i in range(5)],
)
with catch_warnings(record=True):
msg = "cannot set by positional indexing with enlargement"
with pytest.raises(ValueError, match=msg):
df.ix[2, 0] = 100
def test_ix_setitem_out_of_bounds_axis_1(self):
df = DataFrame(
np.random.randn(5, 2),
index=["row%s" % i for i in range(5)],
columns=["col%s" % i for i in range(2)],
)
with catch_warnings(record=True):
msg = "cannot set by positional indexing with enlargement"
with pytest.raises(ValueError, match=msg):
df.ix[0, 2] = 100
def test_ix_empty_list_indexer_is_ok(self):
with catch_warnings(record=True):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(
df.ix[:, []],
df.iloc[:, :0],
check_index_type=True,
check_column_type=True,
)
# horizontal empty
tm.assert_frame_equal(
df.ix[[], :],
df.iloc[:0, :],
check_index_type=True,
check_column_type=True,
)
# horizontal empty
tm.assert_frame_equal(
df.ix[[]], df.iloc[:0, :], check_index_type=True, check_column_type=True
)
def test_ix_duplicate_returns_series(self):
df = DataFrame(
np.random.randn(3, 3), index=[0.1, 0.2, 0.2], columns=list("abc")
)
with catch_warnings(record=True):
r = df.ix[0.2, "a"]
e = df.loc[0.2, "a"]
tm.assert_series_equal(r, e)
| 32.783237 | 88 | 0.489377 |
8795236df7220d002925835262768192f2c908f6
| 1,767 |
py
|
Python
|
script/github-buildstatus-notifier.py
|
ydemetriades/cfstep-github-buildstatus
|
42a9f8d09b0f8833a701be93aff376a0d6c173ae
|
[
"MIT"
] | null | null | null |
script/github-buildstatus-notifier.py
|
ydemetriades/cfstep-github-buildstatus
|
42a9f8d09b0f8833a701be93aff376a0d6c173ae
|
[
"MIT"
] | 5 |
2020-01-26T11:55:03.000Z
|
2020-02-01T20:48:51.000Z
|
script/github-buildstatus-notifier.py
|
ydemetriades/cfstep-github-buildstatus
|
42a9f8d09b0f8833a701be93aff376a0d6c173ae
|
[
"MIT"
] | 1 |
2020-01-23T15:20:15.000Z
|
2020-01-23T15:20:15.000Z
|
#!/usr/bin/env python
import os
import requests
gh_url = os.getenv('GH_BSN_URL', 'https://api.github.com')
repo_owner = os.getenv('CF_REPO_OWNER')
repo_name = os.getenv('CF_REPO_NAME')
repo_auth_user = os.getenv('GH_BSN_REPO_AUTH_USER', repo_owner)
repo_token = os.getenv('GH_BSN_REPO_AUTH_TOKEN')
if repo_owner is None:
print("Repository Owner Environment Variable [CF_REPO_OWNER] is not defined.")
exit(2)
if repo_name is None:
print("Repository Owner Environment Variable [CF_REPO_OWNER] is not defined.")
exit(2)
if repo_token is None:
print("Authentication User Token Environment Variable [GH_BSN_REPO_AUTH_TOKEN] is not defined.")
exit(2)
cf_build_id = os.getenv('CF_BUILD_ID')
cf_status = os.getenv('CF_BUILD_STATUS', 'pending') # 'error', 'failure', 'pending', 'success'
cf_revision = os.getenv('CF_REVISION')
cf_build_url = os.getenv('CF_BUILD_URL')
description = os.getenv('GH_BSN_BUILD_DESCRIPTION', 'Build [{}]'.format(cf_build_id))
context = os.getenv('GH_BSN_BUILD_CONTEXT', 'codefresh-ci')
print('Will Attempt to update build status of commit [{}] to [{}] '.format(cf_revision, cf_status))
data = {
'state': cf_status,
'target_url': cf_build_url,
'description': description,
'context': context
}
# Construct URL
api_url = ('%(url)s/repos/%(owner)s/%(name)s/statuses/%(revision)s'
% {'url': gh_url,
'owner': repo_owner,
'name': repo_name,
'revision': cf_revision})
print('Sending request to:')
print(api_url)
print('with body')
print(data)
# Post build status to Github
response = requests.post(api_url, auth=(repo_auth_user, repo_token), json=data)
print('Response:')
print(response)
print(response.text)
if response:
exit(0)
else:
exit(1)
| 28.967213 | 100 | 0.703452 |
183f11b89009a4c3d8ea131a17a57373ccba0e8d
| 11,114 |
py
|
Python
|
tcex/threat_intelligence/mappings/indicator/indicator.py
|
kdeltared/tcex
|
818c0d09256764f871e42d9ca5916f92d941d882
|
[
"Apache-2.0"
] | null | null | null |
tcex/threat_intelligence/mappings/indicator/indicator.py
|
kdeltared/tcex
|
818c0d09256764f871e42d9ca5916f92d941d882
|
[
"Apache-2.0"
] | null | null | null |
tcex/threat_intelligence/mappings/indicator/indicator.py
|
kdeltared/tcex
|
818c0d09256764f871e42d9ca5916f92d941d882
|
[
"Apache-2.0"
] | null | null | null |
"""ThreatConnect TI Indicator"""
# standard library
import json
from urllib.parse import quote, unquote
from ..mappings import Mappings
# import local modules for dynamic reference
module = __import__(__name__)
def custom_indicator_class_factory(
indicator_type, entity_type, branch_type, base_class, value_fields
):
"""Build dynamic Custom Indicator Class."""
@staticmethod
def _metadata_map_1():
"""Map field data."""
metadata_map = base_class._metadata_map()
for value in value_fields:
manipulated_value = value.lower().replace(' ', '_')
if manipulated_value not in metadata_map.keys():
metadata_map[manipulated_value] = value
return metadata_map
def init(self, tcex, **kwargs): # pylint: disable=possibly-unused-variable
"""Init method for Custom Indicator Types with one value"""
base_class.__init__(
self,
tcex,
sub_type=indicator_type,
api_entity=entity_type,
api_branch=branch_type,
**kwargs,
)
res = {v: k for k, v in self._metadata_map().items()}
values = []
for field in value_fields:
value = kwargs.pop(res.get(field), kwargs.pop(field, ''))
value = quote(self.fully_decode_uri(value), safe='')
values.append(value)
if len(values) == 1:
self.unique_id = kwargs.get('unique_id', values[0])
elif len(values) == 2:
self.unique_id = kwargs.get('unique_id', self.build_summary(values[0], values[1]))
elif len(values) == 3:
self.unique_id = kwargs.get(
'unique_id', self.build_summary(values[0], values[1], values[2])
)
def _set_unique_id(self, json_request):
"""Set the unique ID.
Args:
json_request (dict): The JSON data for the request.
"""
values = []
for field in value_fields:
value = json_request.get(field, '')
values.append(quote(self.fully_decode_uri(value), safe=''))
if len(values) == 1:
self.unique_id = values[0]
elif len(values) == 2:
self.unique_id = self.build_summary(values[0], values[1])
elif len(values) == 1:
self.unique_id = self.build_summary(values[0], values[1], values[2])
def can_create(self): # pylint: disable=unused-argument,possibly-unused-variable
"""Determine if the required data that the API endpoint is expecting is present."""
valid_create = True
for field in value_fields:
if not field:
valid_create = False
return valid_create
class_name = indicator_type.replace(' ', '')
init_method = locals()['init']
set_unique_id_method = locals()['_set_unique_id']
can_create_method = locals()['can_create']
_metadata_map = locals()['_metadata_map_1']
new_class = type(
str(class_name),
(base_class,),
{
'__init__': init_method,
'_set_unique_id': set_unique_id_method,
'can_create': can_create_method,
'_metadata_map': _metadata_map,
},
)
return new_class
class Indicator(Mappings):
"""Unique API calls for Indicator API Endpoints"""
def __init__(self, tcex, **kwargs):
"""Initialize Class Properties."""
super().__init__(
tcex,
'Indicator',
'indicators',
kwargs.pop('sub_type', None),
kwargs.pop('api_entity', 'indicator'),
kwargs.pop('api_branch', None),
kwargs.pop('owner', None),
)
for arg, value in kwargs.items():
self.add_key_value(arg, value)
@property
def as_entity(self):
"""Return the entity representation of the Indicator."""
return {
'type': self.api_sub_type,
'value': unquote(self.unique_id),
'id': self._data.get('id'),
}
@staticmethod
def is_indicator():
"""Return true if object type is an indicator."""
return True
@property
def owner(self):
"""Return the owner."""
return self._owner
def can_create(self):
"""Overridden by other indicator classes."""
return True
@staticmethod
def _metadata_map():
"""Map snake case fields to camel case fields."""
return {
'date_added': 'dateAdded',
'dns_active': 'dnsActive',
'last_modified': 'lastModified',
'private_flag': 'privateFlag',
'whois_active': 'whoisActive',
'key_name': 'Key Name',
'value_type': 'Value Type',
'value_name': 'Value Name',
'block': 'Block',
'mutex': 'Mutex',
'as_number': 'AS Number',
'hostname': 'hostName',
}
def add_key_value(self, key, value):
"""Convert the value and adds it as a data field.
Args:
key:
value:
"""
key = self._metadata_map().get(key, key)
if key in ['dateAdded', 'lastModified']:
self._data[key] = self._utils.datetime.format_datetime(
value, date_format='%Y-%m-%dT%H:%M:%SZ'
)
elif key == 'confidence':
self._data[key] = int(value)
elif key == 'rating':
self._data[key] = float(value)
elif key == 'unique_id':
self._unique_id = quote(self.fully_decode_uri(value), safe='')
else:
self._data[key] = value
def status(self, status=None, cal_status=None):
"""Update the Indicators status
Args:
status: Valid values to set to active are ['active', '2', '1' ] while
['inactive', '-2', '-1', 0] will set it to inactive
cal_status: Valid values to set to locked are ['locked', 'lock', '1' ] while
['unlock', 'unlocked', '0'] will set it to inactive
Returns:
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
if not status and not cal_status:
return None
request_data = {}
if status:
status = str(status)
if status.lower() in ['active', '1']:
request_data['active'] = 1
elif status.lower() in ['inactive', '0']:
request_data['active'] = 0
if cal_status:
cal_status = str(cal_status)
if cal_status.lower() in ['locked', 'lock', '1']:
request_data['activeLocked'] = 1
elif cal_status.lower() in ['unlock', 'unlocked', '0']:
request_data['activeLocked'] = 0
return self.tc_requests.update(
self.api_type, self.api_branch, self.unique_id, request_data, owner=self.owner
)
def rating(self, value):
"""Update the Indicators rating
Args:
value:
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
request_data = {'rating': value}
return self.tc_requests.update(
self.api_type, self.api_branch, self.unique_id, request_data, owner=self.owner
)
def confidence(self, value):
"""Update the Indicators confidence
Args:
value:
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
request_data = {'confidence': value}
return self.tc_requests.update(
self.api_type, self.api_branch, self.unique_id, request_data, owner=self.owner
)
def owners(self):
"""Return owners"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
return self.tc_requests.owners(
self.api_type, self.api_branch, self.unique_id, owner=self.owner
)
def add_observers(self, count, date_observed):
"""Add a Indicator Observation
Args:
count:
date_observed:
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
data = {
'count': count,
'dateObserved': self._utils.datetime.format_datetime(
date_observed, date_format='%Y-%m-%dT%H:%M:%SZ'
),
}
return self.tc_requests.add_observations(
self.api_type, self.api_branch, self.unique_id, data, owner=self.owner
)
def observation_count(self):
"""Get the indicators observation count.
Returns:
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
return self.tc_requests.observation_count(
self.api_type, self.api_branch, self.unique_id, owner=self.owner
)
def add_false_positive(self):
"""Add a Indicator FalsePositive."""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
return self.tc_requests.add_false_positive(
self.api_type, self.api_branch, self.unique_id, owner=self.owner
)
def observations(self):
"""Return indicator observation data.
Returns:
[type]: [description]
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
return self.tc_requests.observations(
self.api_type, self.api_branch, self.unique_id, owner=self.owner
)
def deleted(self, deleted_since=None, filters=None, params=None):
"""Return deleted indicators from TC REST API.
Args:
deleted_since ([type]): [description]
filters ([type], optional): [description]. Defaults to None.
params ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
return self.tc_requests.deleted(
self.api_type,
self.api_branch,
deleted_since=deleted_since,
owner=self.owner,
filters=filters,
params=params,
)
@staticmethod
def build_summary(val1=None, val2=None, val3=None):
"""Construct an indicator summary given va1, va2, val3.
Args:
val1 (str, optional): Indicator value. Defaults to None.
val2 (str, optional): Indicator value. Defaults to None.
val3 (str, optional): Indicator value. Defaults to None.
Returns:
str: <space><colon><space> delimeted indicator summary.
"""
summary = []
if val1 is not None:
summary.append(val1)
if val2 is not None:
summary.append(val2)
if val3 is not None:
summary.append(val3)
if not summary:
return None
return ' : '.join(summary)
def __str__(self):
"""Return string representation of object"""
return json.dumps(self._data, indent=4)
| 32.402332 | 94 | 0.563703 |
a42ea6740457c18e314999da085523c6271f7e78
| 1,953 |
py
|
Python
|
5.0-clases-y-objetos/src/sobrecarga.py
|
zehemz/clases-python-101
|
633cb5f0cbc85e64e242514f0394754a5bed0513
|
[
"Apache-2.0"
] | null | null | null |
5.0-clases-y-objetos/src/sobrecarga.py
|
zehemz/clases-python-101
|
633cb5f0cbc85e64e242514f0394754a5bed0513
|
[
"Apache-2.0"
] | null | null | null |
5.0-clases-y-objetos/src/sobrecarga.py
|
zehemz/clases-python-101
|
633cb5f0cbc85e64e242514f0394754a5bed0513
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3.5
# -*- coding: utf-8 -*-
class Vehiculo(object):
def tipo(self):
print("Dos ruedas")
class Material(object):
def tipo(self):
print("plástico")
class Moto(Vehiculo, Material):
def modelo(self):
print("Modelo 1")
super(Moto, self).tipo()
super().tipo()
Material.tipo(self)
Vehiculo.tipo(self)
class Bicicleta(Material, Vehiculo):
def modelo(self):
print("Modelo 2")
super(Bicicleta, self).tipo()
super().tipo()
Material.tipo(self)
Vehiculo.tipo(self)
print("---------- Prioridad Clases ------------")
print(Moto.__mro__)
print(Bicicleta.__mro__)
print("---------- para objeto 1 ------------")
objeto1 = Moto()
objeto1.modelo()
objeto1.tipo()
print("---------- para objeto 2 ------------")
objeto2 = Bicicleta()
objeto2.modelo()
'''
Nota: En python 3, la sintaxis se simplifica de super(ClaseHija,self).__init__() a
super().__init().
Nota: En python 3 podemos llamar al método de la case padre desde dentro de la clase
hija, directamente indicando el nombre de la clase padre de la cual queremos heredar, y
con notación de punto el nombre del método a utilizar:
Ejemplo: Vehiculo.tipo(self)
Mezclar ambos tipos de llamadas dentro de un código puede traer errores serios, sobre
todo al trabajar con __init__, por lo que NO se debe mezclar las dos variantes dentro de
un código.
'''
''' Ejemplo de mal uso: '''
# class Vehiculo(object):
# def __init__(self):
# print("Dos ruedas")
# super(Vehiculo,self).__init__()
# class Material(object):
# def __init__(self):
# print("plástico")
# super(Material,self).__init__()
# class Moto(Vehiculo, Material):
# def __init__(self):
# print("Modelo 1")
# Vehiculo.__init__(self)
# Material.__init__(self)
#
# objeto1 = Moto()
'''
Se corrige llamando un unico constructor padre de moto.
super().__init__()
'''
| 26.753425 | 88 | 0.635945 |
68363f7c08f8dd8587fc5acdc00f52b4d9474d8a
| 155 |
py
|
Python
|
src/python/phonetics/__init__.py
|
riasat97/Countrynameproject
|
71720b452348b52110438155c5cad39539ed59c4
|
[
"MIT"
] | null | null | null |
src/python/phonetics/__init__.py
|
riasat97/Countrynameproject
|
71720b452348b52110438155c5cad39539ed59c4
|
[
"MIT"
] | null | null | null |
src/python/phonetics/__init__.py
|
riasat97/Countrynameproject
|
71720b452348b52110438155c5cad39539ed59c4
|
[
"MIT"
] | null | null | null |
from .soundex import soundex # NOQA
from .metaphone import metaphone # NOQA
from .metaphone import dmetaphone # NOQA
from .nysiis import nysiis # NOQA
| 31 | 41 | 0.767742 |
189c17e3f65d55e361da44bd479740b66f2ee490
| 1,166 |
py
|
Python
|
app/api/rest/modules/alphabet.py
|
dolfinus/cryptonite
|
b90c24d008f4af78f8ed00fc9a30fea4628a443a
|
[
"MIT"
] | null | null | null |
app/api/rest/modules/alphabet.py
|
dolfinus/cryptonite
|
b90c24d008f4af78f8ed00fc9a30fea4628a443a
|
[
"MIT"
] | 4 |
2020-03-18T12:04:12.000Z
|
2020-07-07T19:32:23.000Z
|
app/api/rest/modules/alphabet.py
|
dolfinus/cryptonite
|
b90c24d008f4af78f8ed00fc9a30fea4628a443a
|
[
"MIT"
] | null | null | null |
class Alphabet:
lang = None
alphabet = None
def __init__(self, lang):
self.lang = lang
self.digits = '0123456789'
if lang == 'en':
self.alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if lang == 'en25':
self.alphabet = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
if lang == 'ru33':
self.alphabet = 'АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ'
if lang == 'ru':
self.alphabet = 'АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ'
self.lowercase = self.alphabet.lower()
self.len = len(self.alphabet)
def get_pos(self, char):
if not char in self.alphabet:
return -1
return self.alphabet.find(char)
@staticmethod
def mod(num, base):
sign = 1 if num >= 0 else -1
num = num % base
if sign < 0:
num += base
return num % base
def mod_pos(self, pos):
return self.mod(pos, len(self.alphabet))
def get_char(self, pos):
return self.alphabet[self.mod_pos(pos)]
def shift(self, char, shift):
if not char in self.alphabet:
return ''
if str(shift) in self.alphabet:
shift = self.get_pos(shift)
result = self.get_char(self.get_pos(char) + int(shift))
return result
| 25.911111 | 59 | 0.629503 |
32582d251b2522f2adc4163d8d35feaef3ba5fdf
| 204 |
py
|
Python
|
automatewithpython/shutil/shutilco.py
|
Coalemus/Python-Projects
|
4b0e0c12a2fdcfbaf491df5715885c61f44bdb1c
|
[
"MIT"
] | null | null | null |
automatewithpython/shutil/shutilco.py
|
Coalemus/Python-Projects
|
4b0e0c12a2fdcfbaf491df5715885c61f44bdb1c
|
[
"MIT"
] | null | null | null |
automatewithpython/shutil/shutilco.py
|
Coalemus/Python-Projects
|
4b0e0c12a2fdcfbaf491df5715885c61f44bdb1c
|
[
"MIT"
] | null | null | null |
#!/bin/zsh
# Code will copy a single file.
import shutil, os
os.chdir("C:\\")
shutil.copy("C:\\spam.txt", "C:\\delicious") # Source, Destination
shutil.copy("eggst.txt", "C:\\delicious\\eggs2.txt")
| 17 | 67 | 0.647059 |
29bd17c812582c01062767e746eee501b5122c2a
| 467 |
py
|
Python
|
alipay/aop/api/response/AntMerchantExpandIndirectIsvModifyResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213 |
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/response/AntMerchantExpandIndirectIsvModifyResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29 |
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/response/AntMerchantExpandIndirectIsvModifyResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59 |
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AntMerchantExpandIndirectIsvModifyResponse(AlipayResponse):
def __init__(self):
super(AntMerchantExpandIndirectIsvModifyResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AntMerchantExpandIndirectIsvModifyResponse, self).parse_response_content(response_content)
| 29.1875 | 115 | 0.792291 |
7ff8c60e2cb5d8a50004cfffeccf66fdd5ea7565
| 26,565 |
py
|
Python
|
app/__init__.py
|
cds-snc/notification-admin
|
d4056798bf889ad29893667bbb67ead2f8e466e4
|
[
"MIT"
] | 16 |
2019-11-05T21:35:49.000Z
|
2022-01-12T15:00:32.000Z
|
app/__init__.py
|
cds-snc/notification-admin
|
d4056798bf889ad29893667bbb67ead2f8e466e4
|
[
"MIT"
] | 509 |
2019-07-11T22:03:19.000Z
|
2022-03-30T15:19:26.000Z
|
app/__init__.py
|
cds-snc/notification-admin
|
d4056798bf889ad29893667bbb67ead2f8e466e4
|
[
"MIT"
] | 8 |
2020-02-21T20:19:29.000Z
|
2022-03-31T14:17:02.000Z
|
import itertools
import os
import re
import urllib
from datetime import datetime, timedelta, timezone
from functools import partial
from numbers import Number
from time import monotonic
from urllib.parse import urljoin
import timeago
from flask import (
Markup,
current_app,
flash,
g,
make_response,
render_template,
request,
session,
url_for,
)
from flask._compat import string_types
from flask.globals import _lookup_req_object, _request_ctx_stack # type: ignore
from flask_babel import Babel, _
from flask_login import LoginManager, current_user
from flask_wtf import CSRFProtect
from flask_wtf.csrf import CSRFError
from itsdangerous import BadSignature
from notifications_python_client.errors import HTTPError
from notifications_utils import formatters, logging, request_helper
from notifications_utils.formatters import formatted_list
from notifications_utils.recipients import (
InvalidPhoneError,
format_phone_number_human_readable,
validate_phone_number,
)
from notifications_utils.sanitise_text import SanitiseASCII
from notifications_utils.timezones import utc_string_to_aware_gmt_datetime
from werkzeug.exceptions import HTTPException as WerkzeugHTTPException
from werkzeug.exceptions import abort
from werkzeug.local import LocalProxy
from app import proxy_fix
from app.asset_fingerprinter import asset_fingerprinter
from app.commands import setup_commands
from app.config import configs
from app.extensions import (
antivirus_client,
cache,
redis_client,
statsd_client,
zendesk_client,
)
from app.models.organisation import Organisation
from app.models.service import Service
from app.models.user import AnonymousUser, User
from app.navigation import (
AdminNavigation,
HeaderNavigation,
MainNavigation,
OrgNavigation,
)
from app.notify_client.api_key_api_client import api_key_api_client
from app.notify_client.billing_api_client import billing_api_client
from app.notify_client.complaint_api_client import complaint_api_client
from app.notify_client.email_branding_client import email_branding_client
from app.notify_client.events_api_client import events_api_client
from app.notify_client.inbound_number_client import inbound_number_client
from app.notify_client.invite_api_client import invite_api_client
from app.notify_client.job_api_client import job_api_client
from app.notify_client.letter_branding_client import letter_branding_client
from app.notify_client.letter_jobs_client import letter_jobs_client
from app.notify_client.notification_api_client import notification_api_client
from app.notify_client.org_invite_api_client import org_invite_api_client
from app.notify_client.organisations_api_client import organisations_client
from app.notify_client.platform_stats_api_client import platform_stats_api_client
from app.notify_client.provider_client import provider_client
from app.notify_client.service_api_client import service_api_client
from app.notify_client.status_api_client import status_api_client
from app.notify_client.template_api_prefill_client import template_api_prefill_client
from app.notify_client.template_folder_api_client import template_folder_api_client
from app.notify_client.template_statistics_api_client import template_statistics_client
from app.notify_client.user_api_client import user_api_client
from app.utils import documentation_url, id_safe
login_manager = LoginManager()
csrf = CSRFProtect()
# The current service attached to the request stack.
def _get_current_service():
return _lookup_req_object("service")
current_service = LocalProxy(_get_current_service)
# The current organisation attached to the request stack.
current_organisation = LocalProxy(partial(_lookup_req_object, "organisation"))
navigation = {
"header_navigation": HeaderNavigation(),
"admin_navigation": AdminNavigation(),
"org_navigation": OrgNavigation(),
"main_navigation": MainNavigation(),
}
def get_current_locale(application):
requestLang = request.accept_languages.best_match(application.config["LANGUAGES"])
if requestLang is None:
requestLang = "en"
if request.args.get("lang") and request.args.get("lang") in ["en", "fr"]:
lang = request.args.get("lang")
else:
lang = session.get("userlang", requestLang)
session["userlang"] = lang
return lang
def create_app(application):
setup_commands(application)
notify_environment = os.environ["NOTIFY_ENVIRONMENT"]
application.config.from_object(configs[notify_environment])
asset_fingerprinter._cdn_domain = application.config["ASSET_DOMAIN"]
asset_fingerprinter._asset_root = urljoin(application.config["ADMIN_BASE_URL"], application.config["ASSET_PATH"])
application.config["BABEL_DEFAULT_LOCALE"] = "en"
babel = Babel(application)
@babel.localeselector
def get_locale():
return get_current_locale(application)
init_app(application)
for client in (
# Gubbins
csrf,
login_manager,
proxy_fix,
request_helper,
cache,
# API clients
api_key_api_client,
billing_api_client,
complaint_api_client,
email_branding_client,
events_api_client,
inbound_number_client,
invite_api_client,
job_api_client,
letter_branding_client,
letter_jobs_client,
notification_api_client,
org_invite_api_client,
organisations_client,
platform_stats_api_client,
provider_client,
service_api_client,
status_api_client,
template_folder_api_client,
template_statistics_client,
template_api_prefill_client,
user_api_client,
# External API clients
antivirus_client,
statsd_client,
zendesk_client,
redis_client,
):
client.init_app(application)
logging.init_app(application, statsd_client)
# Log a warning message if Redis is not enabled
if not application.config["REDIS_ENABLED"]:
application.logger.warning(
"Redis is not enabled. Some features may not be supported. "
"If you want to enable Redis, look at REDIS_* config variables."
)
login_manager.login_view = "main.sign_in"
login_manager.login_message_category = "default"
login_manager.session_protection = None
login_manager.anonymous_user = AnonymousUser
# make sure we handle unicode correctly
redis_client.redis_store.decode_responses = True
from app.main import main as main_blueprint
application.register_blueprint(main_blueprint)
from .status import status as status_blueprint
application.register_blueprint(status_blueprint)
add_template_filters(application)
register_errorhandlers(application)
setup_event_handlers()
def init_app(application):
application.after_request(useful_headers_after_request)
application.after_request(save_service_or_org_after_request)
application.before_request(load_service_before_request)
application.before_request(load_organisation_before_request)
application.before_request(request_helper.check_proxy_header_before_request)
@application.before_request
def make_session_permanent():
# this is dumb. You'd think, given that there's `config['PERMANENT_SESSION_LIFETIME']`, that you'd enable
# permanent sessions in the config too - but no, you have to declare it for each request.
# https://stackoverflow.com/questions/34118093/flask-permanent-session-where-to-define-them
# session.permanent is also, helpfully, a way of saying that the session isn't permanent - in that, it will
# expire on its own, as opposed to being controlled by the browser's session. Because session is a proxy, it's
# only accessible from within a request context, so we need to set this before every request :rolls_eyes:
session.permanent = True
@application.context_processor
def _attach_current_service():
return {"current_service": current_service}
@application.context_processor
def _attach_current_organisation():
return {"current_org": current_organisation}
@application.context_processor
def _attach_current_user():
return {"current_user": current_user}
@application.context_processor
def _nav_selected():
return navigation
@application.before_request
def record_start_time():
g.start = monotonic()
g.endpoint = request.endpoint
@application.context_processor
def inject_global_template_variables():
return {
"header_colour": application.config["HEADER_COLOUR"],
"asset_url": asset_fingerprinter.get_url,
"asset_s3_url": asset_fingerprinter.get_s3_url,
"current_lang": get_current_locale(application),
"admin_base_url": application.config["ADMIN_BASE_URL"],
"sending_domain": application.config["SENDING_DOMAIN"],
"documentation_url": documentation_url,
}
def convert_to_boolean(value):
if isinstance(value, string_types):
if value.lower() in ["t", "true", "on", "yes", "1"]:
return True
elif value.lower() in ["f", "false", "off", "no", "0"]:
return False
return value
def linkable_name(value):
return urllib.parse.quote_plus(value)
def format_number(number):
lang = get_current_locale(current_app)
if lang == "fr":
# Spaces as separators
return "{:,}".format(number).replace(",", "\xa0") # \xa0: nbsp
return "{:,}".format(number) # Commas as separators
def format_datetime(date):
return "{} at {}".format(format_date(date), format_time(date))
def format_datetime_24h(date):
return "{} at {}".format(
format_date(date),
format_time_24h(date),
)
def format_datetime_normal(date):
return "{} at {}".format(format_date_normal(date), format_time(date))
def format_datetime_short(date):
return "{} at {}".format(format_date_short(date), format_time(date))
def format_datetime_relative(date):
return "{} at {}".format(get_human_day(date), format_time(date))
def format_datetime_numeric(date):
return "{} {}".format(
format_date_numeric(date),
format_time_24h(date),
)
def format_date_numeric(date):
return utc_string_to_aware_gmt_datetime(date).strftime("%Y-%m-%d")
def format_time_24h(date):
return utc_string_to_aware_gmt_datetime(date).strftime("%H:%M")
def get_human_day(time):
# Add 1 minute to transform 00:00 into ‘midnight today’ instead of ‘midnight tomorrow’
date = (utc_string_to_aware_gmt_datetime(time) - timedelta(minutes=1)).date()
if date == (datetime.utcnow() + timedelta(days=1)).date():
return "tomorrow"
if date == datetime.utcnow().date():
return "today"
if date == (datetime.utcnow() - timedelta(days=1)).date():
return "yesterday"
return _format_datetime_short(date)
def format_time(date):
return (
{"12:00AM": "Midnight", "12:00PM": "Midday"}
.get(
utc_string_to_aware_gmt_datetime(date).strftime("%-I:%M%p"),
utc_string_to_aware_gmt_datetime(date).strftime("%-I:%M%p"),
)
.lower()
)
def format_date(date):
return utc_string_to_aware_gmt_datetime(date).strftime("%A %d %B %Y")
def format_date_normal(date):
return utc_string_to_aware_gmt_datetime(date).strftime("%d %B %Y").lstrip("0")
def format_date_short(date):
return _format_datetime_short(utc_string_to_aware_gmt_datetime(date))
def _format_datetime_short(datetime):
return datetime.strftime("%d %B").lstrip("0")
def format_delta(_date):
lang = get_current_locale(current_app)
date = utc_string_to_aware_gmt_datetime(_date)
now = datetime.now(timezone.utc)
return timeago.format(date, now, lang)
def translate_preview_template(_template_str):
def translate_brackets(x):
match, word = x.group(0), x.group(1)
return {
"From": _("From"),
"To": _("To"),
"Subject": _("Subject"),
"Reply to": _("Reply to"),
"From:": _("From:"),
"To:": _("To:"),
"phone number": _("phone number"),
"email address": _("email address"),
"hidden": _("hidden"),
}.get(word, match)
# This regex finds words inside []
template_str = re.sub(r"\[([^]]*)\]", translate_brackets, _template_str)
return Markup(template_str)
def format_thousands(value):
if isinstance(value, Number):
return "{:,.0f}".format(float(value))
if value is None:
return ""
return value
def valid_phone_number(phone_number):
try:
validate_phone_number(phone_number)
return True
except InvalidPhoneError:
return False
def format_notification_type(notification_type):
return {"email": "Email", "sms": "SMS", "letter": "Letter"}[notification_type]
def format_notification_status(status, template_type, provider_response=None):
if provider_response:
return _(provider_response)
return {
"email": {
"failed": _("Failed"),
"technical-failure": _("Technical failure"),
"temporary-failure": _("Inbox not accepting messages right now"),
"permanent-failure": _("Email address does not exist"),
"delivered": _("Delivered"),
"sending": _("Sending"),
"created": _("Sending"),
"sent": _("Delivered"),
},
"sms": {
"failed": _("Failed"),
"technical-failure": _("Technical failure"),
"temporary-failure": _("Phone number not accepting messages right now"),
"permanent-failure": _("Phone number does not exist"),
"delivered": _("Delivered"),
"sending": _("Sending"),
"created": _("Sending"),
"pending": _("Sending"),
"sent": _("Sent"),
},
"letter": {
"failed": "",
"technical-failure": "Technical failure",
"temporary-failure": "",
"permanent-failure": "",
"delivered": "",
"received": "",
"accepted": "",
"sending": "",
"created": "",
"sent": "",
"pending-virus-check": "",
"virus-scan-failed": "Virus detected",
"returned-letter": "",
"cancelled": "",
"validation-failed": "Validation failed",
},
}[template_type].get(status, status)
def format_notification_status_as_time(status, created, updated):
return dict.fromkeys(
{"created", "pending", "sending"},
" " + _("since") + ' <span class="local-datetime-short">{}</span>'.format(created),
).get(status, '<span class="local-datetime-short">{}</span>'.format(updated))
def format_notification_status_as_field_status(status, notification_type):
return (
{
"letter": {
"failed": "error",
"technical-failure": "error",
"temporary-failure": "error",
"permanent-failure": "error",
"delivered": None,
"sent": None,
"sending": None,
"created": None,
"accepted": None,
"pending-virus-check": None,
"virus-scan-failed": "error",
"returned-letter": None,
"cancelled": "error",
}
}
.get(
notification_type,
{
"failed": "error",
"technical-failure": "error",
"temporary-failure": "error",
"permanent-failure": "error",
"delivered": None,
"sent": None,
"sending": "default",
"created": "default",
"pending": "default",
},
)
.get(status, "error")
)
def format_notification_status_as_url(status, notification_type):
url = partial(url_for, "main.messages_status")
if status not in {
"technical-failure",
"temporary-failure",
"permanent-failure",
}:
return None
return {
"email": url(_anchor="email-statuses"),
"sms": url(_anchor="sms-statuses"),
}.get(notification_type)
def get_and_n_more_text(number_of_addresses):
"number_of_addresses could be email addresses or sms sending numbers"
number_of_hidden_addresses = number_of_addresses - 1
if number_of_hidden_addresses < 1:
# This should never happen - this function is not
# called in this case.
return _("…and 0 more")
if number_of_hidden_addresses == 1:
return _("…and 1 more")
if number_of_hidden_addresses > 1:
return _("…and {} more").format(number_of_hidden_addresses)
def nl2br(value):
return formatters.nl2br(value) if value else ""
@login_manager.user_loader
def load_user(user_id):
return User.from_id(user_id)
def load_service_before_request():
if "/static/" in request.url:
_request_ctx_stack.top.service = None
_request_ctx_stack.top.organisation = None # added to init None to ensure request context has None or something
return
if _request_ctx_stack.top is not None:
_request_ctx_stack.top.service = None
_request_ctx_stack.top.organisation = None # added to init None to ensure request context has None or something
if request.view_args:
service_id = request.view_args.get("service_id", session.get("service_id"))
else:
service_id = session.get("service_id")
if service_id:
try:
_request_ctx_stack.top.service = Service(service_api_client.get_service(service_id)["data"])
except HTTPError as exc:
# if service id isn't real, then 404 rather than 500ing later because we expect service to be set
if exc.status_code == 404:
abort(404)
else:
raise
def load_organisation_before_request():
if "/static/" in request.url:
_request_ctx_stack.top.organisation = None
return
if _request_ctx_stack.top is not None:
_request_ctx_stack.top.organisation = None
if request.view_args:
org_id = request.view_args.get("org_id")
if org_id:
try:
_request_ctx_stack.top.organisation = Organisation.from_id(org_id)
except HTTPError as exc:
# if org id isn't real, then 404 rather than 500ing later because we expect org to be set
if exc.status_code == 404:
abort(404)
else:
raise
def save_service_or_org_after_request(response):
# Only save the current session if the request is 200
service_id = request.view_args.get("service_id", None) if request.view_args else None
organisation_id = request.view_args.get("org_id", None) if request.view_args else None
if response.status_code == 200:
if service_id:
session["service_id"] = service_id
session["organisation_id"] = None
elif organisation_id:
session["service_id"] = None
session["organisation_id"] = organisation_id
return response
# https://www.owasp.org/index.php/List_of_useful_HTTP_headers
def useful_headers_after_request(response):
response.headers.add("X-Frame-Options", "deny")
response.headers.add("X-Content-Type-Options", "nosniff")
response.headers.add("X-XSS-Protection", "1; mode=block")
response.headers.add("Permissions-Policy", "interest-cohort=()")
response.headers.add(
"Content-Security-Policy",
(
"default-src 'self' {asset_domain} 'unsafe-inline';"
"script-src 'self' {asset_domain} *.google-analytics.com *.googletagmanager.com 'unsafe-inline' 'unsafe-eval' data:;"
"connect-src 'self' *.google-analytics.com;"
"object-src 'self';"
"style-src 'self' *.googleapis.com 'unsafe-inline';"
"font-src 'self' {asset_domain} *.googleapis.com *.gstatic.com data:;"
"img-src 'self' {asset_domain} *.google-analytics.com *.notifications.service.gov.uk data:;" # noqa: E501
"frame-src 'self' www.youtube.com;".format(
asset_domain=current_app.config["ASSET_DOMAIN"],
)
),
)
if "Cache-Control" in response.headers:
del response.headers["Cache-Control"]
# Cache static assets (CSS, JS, images) for a long time
# as they have unique hashes thanks to the asset
# fingerprinter
if asset_fingerprinter.is_static_asset(request.url):
response.headers.add("Cache-Control", "public, max-age=31536000, immutable")
else:
response.headers.add("Cache-Control", "no-store, no-cache, private, must-revalidate")
for key, value in response.headers:
response.headers[key] = SanitiseASCII.encode(value)
return response
def register_errorhandlers(application): # noqa (C901 too complex)
def _error_response(error_code):
resp = make_response(render_template("error/{0}.html".format(error_code)), error_code)
return useful_headers_after_request(resp)
@application.errorhandler(HTTPError)
def render_http_error(error):
application.logger.warning(
"API {} failed with status {} message {}".format(
error.response.url if error.response else "unknown",
error.status_code,
error.message,
)
)
error_code = error.status_code
if error_code == 400:
# all incoming 400 errors from the API are wrapped for translation
# Need to make sure all of them have translations in the csv files
if isinstance(error.message, str):
msg = [_(error.message)]
else:
msg = list(itertools.chain(_(error.message[x]) for x in error.message.keys()))
resp = make_response(render_template("error/400.html", message=msg))
return useful_headers_after_request(resp)
elif error_code not in [401, 404, 403, 410]:
# probably a 500 or 503
application.logger.exception(
"API {} failed with status {} message {}".format(
error.response.url if error.response else "unknown",
error.status_code,
error.message,
)
)
error_code = 500
return _error_response(error_code)
@application.errorhandler(400)
def handle_400(error):
return _error_response(400)
@application.errorhandler(410)
def handle_gone(error):
return _error_response(410)
@application.errorhandler(413)
def handle_payload_too_large(error):
return _error_response(413)
@application.errorhandler(404)
def handle_not_found(error):
return _error_response(404)
@application.errorhandler(403)
def handle_not_authorized(error):
return _error_response(403)
@application.errorhandler(401)
def handle_no_permissions(error):
return _error_response(401)
@application.errorhandler(BadSignature)
def handle_bad_token(error):
# if someone has a malformed token
flash(_("There’s something wrong with the link you’ve used."))
return _error_response(404)
@application.errorhandler(CSRFError)
def handle_csrf(reason):
application.logger.warning("csrf.error_message: {}".format(reason))
if "user_id" not in session:
application.logger.warning("csrf.session_expired: Redirecting user to log in page")
return application.login_manager.unauthorized()
application.logger.warning(
"csrf.invalid_token: Aborting request, user_id: {user_id}",
extra={"user_id": session["user_id"]},
)
resp = make_response(
render_template(
"error/400.html",
message=["Something went wrong, please go back and try again."],
),
400,
)
return useful_headers_after_request(resp)
@application.errorhandler(405)
def handle_405(error):
resp = make_response(
render_template(
"error/400.html",
message=["Something went wrong, please go back and try again."],
),
405,
)
return useful_headers_after_request(resp)
@application.errorhandler(WerkzeugHTTPException)
def handle_http_error(error):
if error.code == 301:
# PermanentRedirect exception
return error
return _error_response(error.code)
@application.errorhandler(500)
@application.errorhandler(Exception)
def handle_bad_request(error):
current_app.logger.exception(error)
# We want the Flask in browser stacktrace
if current_app.config.get("DEBUG", None):
raise error
if "Detected newline in header value" in str(error):
return _error_response(400)
else:
return _error_response(500)
def setup_event_handlers():
from flask_login import user_logged_in
from app.event_handlers import on_user_logged_in
user_logged_in.connect(on_user_logged_in)
def add_template_filters(application):
for fn in [
format_number,
format_datetime,
format_datetime_24h,
format_datetime_normal,
format_datetime_short,
format_time,
valid_phone_number,
linkable_name,
format_date,
format_date_normal,
format_date_short,
format_datetime_relative,
format_delta,
translate_preview_template,
format_notification_status,
format_notification_type,
format_notification_status_as_time,
format_notification_status_as_field_status,
format_notification_status_as_url,
formatted_list,
get_and_n_more_text,
nl2br,
format_phone_number_human_readable,
format_thousands,
id_safe,
]:
application.add_template_filter(fn)
| 33.626582 | 129 | 0.662451 |
c706e60049968d64ab4b96bcd3ba89000ff0b580
| 6,042 |
py
|
Python
|
chainer/functions/math/tensordot.py
|
dydo0316/test2
|
a9982a8b426dd07eb1ec4e7695a7bc546ecc6063
|
[
"MIT"
] | null | null | null |
chainer/functions/math/tensordot.py
|
dydo0316/test2
|
a9982a8b426dd07eb1ec4e7695a7bc546ecc6063
|
[
"MIT"
] | null | null | null |
chainer/functions/math/tensordot.py
|
dydo0316/test2
|
a9982a8b426dd07eb1ec4e7695a7bc546ecc6063
|
[
"MIT"
] | null | null | null |
import numpy
import six
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import collections_abc
from chainer.utils import type_check
def _tensordot(a, b, a_axes, b_axes, c_axes=None):
a_col_ndim = len(a_axes[1])
b_row_ndim = len(b_axes[0])
if a_col_ndim != b_row_ndim:
raise ValueError('axes count mismatch')
if a.ndim < a_col_ndim or b.ndim < b_row_ndim:
raise ValueError('dimension of input tensors must be '
'greater equal to dot-axes count ({})'
.format(a_col_ndim))
for a_axis, b_axis in zip(a_axes[1], b_axes[0]):
if a.shape[a_axis] != b.shape[b_axis]:
raise ValueError('shape mismatch')
xp = cuda.get_array_module(a)
y = xp.tensordot(a, b, axes=(tuple(a_axes[1]), tuple(b_axes[0])))
if c_axes is not None:
a_row_ndim = len(a_axes[0])
b_col_ndim = len(b_axes[1])
c_row_ndim = len(c_axes[0])
c_col_ndim = len(c_axes[1])
if a_row_ndim != c_row_ndim:
raise ValueError('axes count mismatch')
if b_col_ndim != c_col_ndim:
raise ValueError('axes count mismatch')
trans = [None for i in six.moves.range(y.ndim)]
table_a = [1 if i in a_axes[0] else 0 for i in six.moves.range(a.ndim)]
table_a = numpy.cumsum(table_a) - 1
for i, c_axis in enumerate(c_axes[0]):
trans[c_axis] = table_a[a_axes[0][i]]
table_b = [1 if i in b_axes[1] else 0 for i in six.moves.range(b.ndim)]
table_b = numpy.cumsum(table_b) - 1
for i, c_axis in enumerate(c_axes[1]):
trans[c_axis] = table_b[b_axes[1][i]] + len(a_axes[0])
for i, c_axis in enumerate(trans):
if i != c_axis:
y = xp.transpose(y, trans)
break
return y
class TensorDot(function_node.FunctionNode):
def __init__(self, axes=2, a_axes=None, b_axes=None, c_axes=None,
dtype=None):
self.axes = axes
self.a_axes = a_axes
self.b_axes = b_axes
self.c_axes = c_axes
self.dtype = dtype
if isinstance(axes, collections_abc.Sequence):
if len(axes) != 2:
raise ValueError('axes must be a pair of sequence of integers '
'when it is a list or tuple.')
elif isinstance(axes, int):
pass
else:
raise TypeError('axes must be a pair of sequence of integers or '
'an integer')
def check_type_forward(self, in_types):
type_check.argname(in_types, ('a', 'b'))
a_type, b_type = in_types
type_check.expect(
a_type.dtype.kind == 'f',
b_type.dtype.kind == 'f',
)
def forward(self, inputs):
self.retain_inputs((0, 1))
a, b = inputs
if self.a_axes is None or self.b_axes is None:
a_axes = [[], []] # 0:row axes, 1:col axes
b_axes = [[], []] # 0:row axes, 1:col axes
axes = self.axes
if isinstance(axes, collections_abc.Sequence):
a_axes[1], b_axes[0] = axes
if numpy.isscalar(a_axes[1]):
a_axes[1] = a_axes[1],
if numpy.isscalar(b_axes[0]):
b_axes[0] = b_axes[0],
else:
a_axes[1] = six.moves.range(a.ndim - axes, a.ndim)
b_axes[0] = six.moves.range(axes)
a_range = six.moves.range(a.ndim)
a_axes[0] = [i for i in a_range if i not in a_axes[1]]
b_range = six.moves.range(b.ndim)
b_axes[1] = [i for i in b_range if i not in b_axes[0]]
self.a_axes = a_axes
self.b_axes = b_axes
c = _tensordot(a, b, self.a_axes, self.b_axes, self.c_axes)
if self.c_axes is None:
c_axes = [[], []] # 0:row axes, 1:col axes
c_row_ndim = len(self.a_axes[0])
c_col_ndim = len(self.b_axes[1])
c_axes[0] = six.moves.range(c_row_ndim)
c_axes[1] = six.moves.range(c_row_ndim, c_row_ndim + c_col_ndim)
self.c_axes = c_axes
return utils.force_array(c, self.dtype),
def backward(self, indexes, grad_outputs):
a, b = self.get_retained_inputs()
gc, = grad_outputs
ga = None
if 0 in indexes:
ga, = TensorDot(a_axes=self.c_axes,
b_axes=[self.b_axes[1], self.b_axes[0]],
c_axes=self.a_axes,
dtype=a.dtype).apply((gc, b))
gb = None
if 1 in indexes:
gb, = TensorDot(a_axes=[self.a_axes[1], self.a_axes[0]],
b_axes=self.c_axes,
c_axes=self.b_axes,
dtype=b.dtype).apply((a, gc))
return ga, gb
def tensordot(a, b, axes=2):
"""Returns the tensor dot product of two arrays along specified axes.
This is equivalent to compute dot product along the specified axes which
are treated as one axis by reshaping.
Args:
a (Variable): The first argument.
b (Variable): The second argument.
axes:
- If it is an integer, then ``axes`` axes at the last of ``a`` and
the first of ``b`` are used.
- If it is a pair of sequences of integers, then these two
sequences specify the list of axes for ``a`` and ``b``. The
corresponding axes are paired for sum-product.
Returns:
~chainer.Variable: The tensor dot product of ``a`` and ``b`` along the
axes specified by ``axes``.
.. admonition:: Example
>>> a = np.random.rand(5, 3, 2)
>>> b = np.random.rand(3, 2, 4)
>>> c = F.tensordot(a, b, axes=2)
>>> c.shape
(5, 4)
.. seealso:: :func:`numpy.tensordot`
"""
return TensorDot(axes=axes).apply((a, b))[0]
| 35.127907 | 79 | 0.547501 |
7087471a4d256da42baaccebac4d0e173667d875
| 4,613 |
py
|
Python
|
tests/thumbnail_tests/utils.py
|
Resmin/sorl-thumbnail
|
caea1969ecdd0d7de2456b8dbdcdfca60412358f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/thumbnail_tests/utils.py
|
Resmin/sorl-thumbnail
|
caea1969ecdd0d7de2456b8dbdcdfca60412358f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/thumbnail_tests/utils.py
|
Resmin/sorl-thumbnail
|
caea1969ecdd0d7de2456b8dbdcdfca60412358f
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
import os
import shutil
import unittest
import logging
from contextlib import contextmanager
from subprocess import check_output
from PIL import Image
from django.test.signals import setting_changed
from django.conf import UserSettingsHolder
from sorl.thumbnail.conf import settings
from sorl.thumbnail.helpers import get_module_class
from sorl.thumbnail.images import ImageFile
from sorl.thumbnail.log import ThumbnailLogHandler
from .models import Item
from .storage import MockLoggingHandler
DATA_DIR = os.path.join(settings.MEDIA_ROOT, 'data')
handler = ThumbnailLogHandler()
handler.setLevel(logging.ERROR)
logging.getLogger('sorl.thumbnail').addHandler(handler)
@contextmanager
def same_open_fd_count(testcase):
num_opened_fd_before = get_open_fds_count()
yield
num_opened_fd_after = get_open_fds_count()
testcase.assertEqual(
num_opened_fd_before, num_opened_fd_after,
'Open descriptors count changed, was %s, now %s' % (num_opened_fd_before,
num_opened_fd_after)
)
def get_open_fds_count():
"""Return the number of open file descriptors for current process
.. warning: will only work on UNIX-like os-es.
"""
pid = os.getpid()
procs = check_output(["lsof", '-w', '-Ff', "-p", str(pid)])
nprocs = len(
[s for s in procs.decode('utf-8').split('\n') if s and s[0] == 'f' and s[1:].isdigit()]
)
return nprocs
class override_custom_settings(object):
"""
settings overrider context manager.
https://github.com/django/django/blob/1.6.2/django/test/utils.py#L209-L268
"""
def __init__(self, settings_obj, **kwargs):
self.settings = settings_obj
self.options = kwargs
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def enable(self):
override = UserSettingsHolder(self.settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = self.settings._wrapped
self.settings._wrapped = override
for key, new_value in self.options.items():
setting_changed.send(sender=self.settings._wrapped.__class__,
setting=key, value=new_value, enter=True)
def disable(self):
self.settings._wrapped = self.wrapped
del self.wrapped
for key in self.options:
new_value = getattr(self.settings, key, None)
setting_changed.send(sender=self.settings._wrapped.__class__,
setting=key, value=new_value, enter=False)
class FakeFile(object):
"""
Used to test the _get_format method.
"""
def __init__(self, name):
self.name = name
class BaseTestCase(unittest.TestCase):
IMAGE_DIMENSIONS = [(500, 500), (100, 100), (200, 100), ]
BACKEND = None
ENGINE = None
KVSTORE = None
def create_image(self, name, dim):
"""
Creates an image and prepends the MEDIA ROOT path.
:param name: e.g. 500x500.jpg
:param dim: a dimension tuple e.g. (500, 500)
"""
filename = os.path.join(settings.MEDIA_ROOT, name)
im = Image.new('L', dim)
im.save(filename)
return Item.objects.get_or_create(image=name)
def setUp(self):
self.BACKEND = get_module_class(settings.THUMBNAIL_BACKEND)()
self.ENGINE = get_module_class(settings.THUMBNAIL_ENGINE)()
self.KVSTORE = get_module_class(settings.THUMBNAIL_KVSTORE)()
if not os.path.exists(settings.MEDIA_ROOT):
os.makedirs(settings.MEDIA_ROOT)
shutil.copytree(settings.DATA_ROOT, DATA_DIR)
for dimension in self.IMAGE_DIMENSIONS:
name = '%sx%s.jpg' % dimension
self.create_image(name, dimension)
def tearDown(self):
shutil.rmtree(settings.MEDIA_ROOT)
class BaseStorageTestCase(unittest.TestCase):
image = None
name = None
def setUp(self):
os.makedirs(settings.MEDIA_ROOT)
filename = os.path.join(settings.MEDIA_ROOT, self.name)
Image.new('L', (100, 100)).save(filename)
self.image = ImageFile(self.name)
logger = logging.getLogger('slog')
logger.setLevel(logging.DEBUG)
handler = MockLoggingHandler(level=logging.DEBUG)
logger.addHandler(handler)
self.log = handler.messages['debug']
def tearDown(self):
shutil.rmtree(settings.MEDIA_ROOT)
| 30.959732 | 95 | 0.660525 |
704b2657023bb17bb56383a04c7d84d4532bc5e1
| 253 |
py
|
Python
|
configs/faster_rcnn_hpc/faster_rcnn_r50_fpn_1x_tct_moco_lr10.py
|
zhaoyang97/mmdetection
|
93ce0e7b735ad1ed2e7d856ef80e3aa598cb47e5
|
[
"Apache-2.0"
] | null | null | null |
configs/faster_rcnn_hpc/faster_rcnn_r50_fpn_1x_tct_moco_lr10.py
|
zhaoyang97/mmdetection
|
93ce0e7b735ad1ed2e7d856ef80e3aa598cb47e5
|
[
"Apache-2.0"
] | null | null | null |
configs/faster_rcnn_hpc/faster_rcnn_r50_fpn_1x_tct_moco_lr10.py
|
zhaoyang97/mmdetection
|
93ce0e7b735ad1ed2e7d856ef80e3aa598cb47e5
|
[
"Apache-2.0"
] | null | null | null |
_base_ = ['./faster_rcnn_r50_fpn_1x_tct.py']
model = dict(
# pretrained='open-mmlab://detectron2/resnet50_caffe',
pretrained='icode/moco_v2_800ep_pretrain_rename.pth')
optimizer = dict(type='SGD', lr=0.02/20, momentum=0.9, weight_decay=0.0001)
| 36.142857 | 75 | 0.73913 |
f992c5f8589e66c70d1436359403fd910b66b1f9
| 1,800 |
py
|
Python
|
meraki/api/mx_inbound_firewall.py
|
fsandberg/dashboard-api-python
|
c01ff038643a39bd12660d2719375eeb05c7ba24
|
[
"MIT"
] | null | null | null |
meraki/api/mx_inbound_firewall.py
|
fsandberg/dashboard-api-python
|
c01ff038643a39bd12660d2719375eeb05c7ba24
|
[
"MIT"
] | null | null | null |
meraki/api/mx_inbound_firewall.py
|
fsandberg/dashboard-api-python
|
c01ff038643a39bd12660d2719375eeb05c7ba24
|
[
"MIT"
] | null | null | null |
class MXInboundFirewall(object):
def __init__(self, session):
super(MXInboundFirewall, self).__init__()
self._session = session
def getNetworkApplianceFirewallInboundFirewallRules(self, networkId: str):
"""
**Return the inbound firewall rules for an MX network**
https://developer.cisco.com/docs/meraki-api-v0/#!get-network-appliance-firewall-inbound-firewall-rules
- networkId (string)
"""
metadata = {
'tags': ['MX inbound firewall'],
'operation': 'getNetworkApplianceFirewallInboundFirewallRules',
}
resource = f'/networks/{networkId}/appliance/firewall/inboundFirewallRules'
return self._session.get(metadata, resource)
def updateNetworkApplianceFirewallInboundFirewallRules(self, networkId: str, **kwargs):
"""
**Update the inbound firewall rules of an MX network**
https://developer.cisco.com/docs/meraki-api-v0/#!update-network-appliance-firewall-inbound-firewall-rules
- networkId (string)
- rules (array): An ordered array of the firewall rules (not including the default rule)
- syslogDefaultRule (boolean): Log the special default rule (boolean value - enable only if you've configured a syslog server) (optional)
"""
kwargs.update(locals())
metadata = {
'tags': ['MX inbound firewall'],
'operation': 'updateNetworkApplianceFirewallInboundFirewallRules',
}
resource = f'/networks/{networkId}/appliance/firewall/inboundFirewallRules'
body_params = ['rules', 'syslogDefaultRule']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return self._session.put(metadata, resource, payload)
| 40 | 145 | 0.655556 |
fb61b7491608973267199175e119b9307878510c
| 1,843 |
py
|
Python
|
aldryn_newsblog/tests/test_i18n.py
|
compoundpartners/js-articles
|
2b48ed805f44c77fee9fdc6eae0b711147d9b77c
|
[
"BSD-3-Clause"
] | null | null | null |
aldryn_newsblog/tests/test_i18n.py
|
compoundpartners/js-articles
|
2b48ed805f44c77fee9fdc6eae0b711147d9b77c
|
[
"BSD-3-Clause"
] | null | null | null |
aldryn_newsblog/tests/test_i18n.py
|
compoundpartners/js-articles
|
2b48ed805f44c77fee9fdc6eae0b711147d9b77c
|
[
"BSD-3-Clause"
] | 1 |
2018-12-10T10:42:42.000Z
|
2018-12-10T10:42:42.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from django.core.urlresolvers import NoReverseMatch
except ImportError:
# Django 2.0
from django.urls import NoReverseMatch
from django.utils.translation import override
from . import NewsBlogTestCase
class TestI18N(NewsBlogTestCase):
def test_absolute_url_fallback(self):
# Create an EN article
with override('en'):
article = self.create_article(
title='God Save the Queen!', slug='god-save-queen')
# Add a DE translation
article.create_translation('de',
title='Einigkeit und Recht und Freiheit!',
slug='einigkeit-und-recht-und-freiheit')
# Reload for good measure
article = self.reload(article)
self.assertEquals(article.get_absolute_url(language='en'),
'/en/page/god-save-queen/')
# Test that we can request the other defined language too
self.assertEquals(article.get_absolute_url(language='de'),
'/de/page/einigkeit-und-recht-und-freiheit/')
# Now, let's request a language that article has not yet been translated
# to, but has fallbacks defined, we should get EN
self.assertEquals(article.get_absolute_url(language='fr'),
'/en/page/god-save-queen/')
# With settings changed to 'redirect_on_fallback': False, test again.
with self.settings(CMS_LANGUAGES=self.NO_REDIRECT_CMS_SETTINGS):
self.assertEquals(article.get_absolute_url(language='fr'),
'/fr/page/god-save-queen/')
# Now, let's request a language that has a fallback defined, but it is
# not available either (should raise NoReverseMatch)
with self.assertRaises(NoReverseMatch):
article.get_absolute_url(language='it')
| 36.86 | 80 | 0.66522 |
6843ff0221dffea4ddf8da8ad5b728662f2c5caa
| 961 |
py
|
Python
|
server/src/tests/samples/annotatedVar5.py
|
adafruit/pyright
|
cdb883014bbb7392c04c4d4c65aa1c95aa7fa7ab
|
[
"MIT"
] | 1 |
2019-09-14T06:02:16.000Z
|
2019-09-14T06:02:16.000Z
|
server/src/tests/samples/annotatedVar5.py
|
adafruit/pyright
|
cdb883014bbb7392c04c4d4c65aa1c95aa7fa7ab
|
[
"MIT"
] | 4 |
2021-03-11T07:03:36.000Z
|
2021-10-06T22:27:25.000Z
|
server/src/tests/samples/annotatedVar5.py
|
acidburn0zzz/pyright
|
e41cc1bd7634ebfb25e8c71d5f065236581e0bff
|
[
"MIT"
] | null | null | null |
# This sample tests type annotations for instance variables.
class ClassC(object):
def __init__(self):
# This should generate an error.
self.inst_var1 = 3
@property
def prop1(self):
return 1
@prop1.setter
def prop1(self, val):
pass
def foo(self):
# This should generate an error because the assigned
# type doesn't match the declared type.
self.inst_var1 = 3 # type: str
self.inst_var1: str = 'hello'
# This should generate an error because the declared
# type doesn't match the previously declared type.
self.inst_var1: int = 'hello'
# This should generate an error because the declared
# type doesn't match the previously declared type.
self.inst_var1 = 'hello' # type: int
self.prop1 = 3
class ClassE(ClassC):
def __init__(self):
# This should generate an error.
self.inst_var1 = 3
| 24.025 | 60 | 0.620187 |
7876cfa0064f3bb140057dabe013d817a85c7372
| 2,225 |
py
|
Python
|
tests/test_s_dual_thrust.py
|
terencehk/rqalpha
|
349e6a0a8e45449646acd6063cdec06df3bc1171
|
[
"Apache-2.0"
] | 5,263 |
2016-07-20T10:41:10.000Z
|
2022-03-29T08:24:34.000Z
|
tests/test_s_dual_thrust.py
|
terencehk/rqalpha
|
349e6a0a8e45449646acd6063cdec06df3bc1171
|
[
"Apache-2.0"
] | 572 |
2016-07-28T07:51:02.000Z
|
2022-02-09T15:28:03.000Z
|
tests/test_s_dual_thrust.py
|
terencehk/rqalpha
|
349e6a0a8e45449646acd6063cdec06df3bc1171
|
[
"Apache-2.0"
] | 1,769 |
2016-07-20T11:11:55.000Z
|
2022-03-31T10:11:38.000Z
|
def init(context):
context.s1 = '000905.XSHG'
subscribe(context.s1)
def handle_bar(context, bar_dict):
# stocknum = 50
his = history_bars(context.s1, 10, '1d', 'close')
# print(his)
if his[9] / his[8] < 0.97:
if len(context.portfolio.positions) > 0:
for stock in context.portfolio.positions.keys():
order_target_percent(stock, 0)
return
# 分配资金
# if len(context.portfolio.positions) < stocknum:
# Num = stocknum - len(context.portfolio.positions)
# Cash = context.portfolio.cash/Num
# else:
# Cash = context.portfolio.cash
# Buy
# 求出持有该股票的仓位,买入没有持仓并符合条件股票
position = context.portfolio.positions[context.s1].quantity
# print(position)
if position < 100:
High = history_bars(context.s1, 3, '1d', 'high')
Low = history_bars(context.s1, 3, '1d', 'low')
Close = history_bars(context.s1, 3, '1d', 'close')
Open = history_bars(context.s1, 3, '1d', 'open')
# logger.info(High)
HH = max(High[:2])
LC = min(Close[:2])
HC = max(Close[:2])
LL = min(Low[:2])
Openprice = Open[2]
# logger.info(HH)
# print(LC)
# print(HC)
# print(LL)
# print(Openprice)
# 使用第n-1日的收盘价作为当前价
current_price = Close[2]
Range = max((HH - LC), (HC - LL))
K1 = 0.9
BuyLine = Openprice + K1 * Range
# print(BuyLine,'buyline')
if current_price > BuyLine:
order_target_percent(context.s1, 1)
hist = history_bars(context.s1, 3, '1d', 'close')
case1 = (1 - hist[2] / hist[0]) >= 0.06
case2 = hist[1] / hist[0] <= 0.92
if case1 or case2:
order_target_percent(context.s1, 0)
__config__ = {
"base": {
"start_date": "2013-01-01",
"end_date": "2015-12-29",
"frequency": "1d",
"matching_type": "current_bar",
"benchmark": "000300.XSHG",
"accounts": {
"stock": 1000000
}
},
"extra": {
"log_level": "error",
},
"mod": {
"sys_progress": {
"enabled": True,
"show": True,
},
},
}
| 25.872093 | 63 | 0.523146 |
4505630e0e0b0ce502fbd89137f7e1d39494681c
| 12,133 |
py
|
Python
|
stacker/actions/build.py
|
GoodRx/stacker
|
0cf1df67b4ae5aeda5845442c84905909101c238
|
[
"BSD-2-Clause"
] | 1 |
2021-11-06T17:01:01.000Z
|
2021-11-06T17:01:01.000Z
|
stacker/actions/build.py
|
GoodRx/stacker
|
0cf1df67b4ae5aeda5845442c84905909101c238
|
[
"BSD-2-Clause"
] | null | null | null |
stacker/actions/build.py
|
GoodRx/stacker
|
0cf1df67b4ae5aeda5845442c84905909101c238
|
[
"BSD-2-Clause"
] | 1 |
2021-11-06T17:00:53.000Z
|
2021-11-06T17:00:53.000Z
|
import logging
from .base import BaseAction
from .. import exceptions, util
from ..exceptions import StackDidNotChange
from ..plan import Plan
from ..status import (
NotSubmittedStatus,
NotUpdatedStatus,
DidNotChangeStatus,
SubmittedStatus,
CompleteStatus,
SUBMITTED
)
logger = logging.getLogger(__name__)
def should_update(stack):
"""Tests whether a stack should be submitted for updates to CF.
Args:
stack (:class:`stacker.stack.Stack`): The stack object to check.
Returns:
bool: If the stack should be updated, return True.
"""
if stack.locked:
if not stack.force:
logger.debug("Stack %s locked and not in --force list. "
"Refusing to update.", stack.name)
return False
else:
logger.debug("Stack %s locked, but is in --force "
"list.", stack.name)
return True
def should_submit(stack):
"""Tests whether a stack should be submitted to CF for update/create
Args:
stack (:class:`stacker.stack.Stack`): The stack object to check.
Returns:
bool: If the stack should be submitted, return True.
"""
if stack.enabled:
return True
logger.debug("Stack %s is not enabled. Skipping.", stack.name)
return False
def resolve_parameters(parameters, blueprint, context, provider):
"""Resolves parameters for a given blueprint.
Given a list of parameters, first discard any parameters that the
blueprint does not use. Then, if a parameter is a list of outputs
in the format of <stack_name>::<output_name>,... pull those output(s)
from the foreign stack(s).
Args:
parameters (dict): A dictionary of parameters provided by the
stack definition
blueprint (:class:`stacker.blueprint.base.Blueprint`): A Blueprint
object that is having the parameters applied to it.
context (:class:`stacker.context.Context`): The context object used
to get the FQN of stacks.
provider (:class:`stacker.providers.base.BaseProvider`): The provider
used for looking up stacks & their outputs.
Returns:
dict: The resolved parameters.
"""
params = {}
blueprint_params = blueprint.parameters
for k, v in parameters.items():
if k not in blueprint_params:
logger.debug("Template %s does not use parameter %s.",
blueprint.name, k)
continue
value = v
if isinstance(value, basestring) and "::" in value:
# Get from the Output(s) of another stack(s) in the stack_map
v_list = []
values = value.split(",")
for v in values:
v = v.strip()
stack_name, output = v.split("::")
stack_fqn = context.get_fqn(stack_name)
try:
v_list.append(
provider.get_output(stack_fqn, output))
except KeyError:
raise exceptions.OutputDoesNotExist(stack_fqn, v)
value = ",".join(v_list)
if value is None:
logger.debug("Got None value for parameter %s, not submitting it "
"to cloudformation, default value should be used.",
k)
continue
if isinstance(value, bool):
logger.debug("Converting parameter %s boolean \"%s\" to string.",
k, value)
value = str(value).lower()
params[k] = value
return params
class Action(BaseAction):
"""Responsible for building & coordinating CloudFormation stacks.
Generates the build plan based on stack dependencies (these dependencies
are determined automatically based on references to output values from
other stacks).
The plan can then either be printed out as an outline or executed. If
executed, each stack will get launched in order which entails:
- Pushing the generated CloudFormation template to S3 if it has changed
- Submitting either a build or update of the given stack to the
`Provider`.
- Stores the stack outputs for reference by other stacks.
"""
def _resolve_parameters(self, parameters, blueprint):
"""Resolves parameters for a given blueprint.
Given a list of parameters, first discard any parameters that the
blueprint does not use. Then, if a parameter is a list of outputs
in the format of <stack_name>::<output_name>,... pull those output(s)
from the foreign stack(s).
Args:
parameters (dict): A dictionary of parameters provided by the
stack definition
blueprint (:class:`stacker.blueprint.base.Blueprint`): A Blueprint
object that is having the parameters applied to it.
Returns:
dict: The resolved parameters.
"""
return resolve_parameters(parameters, blueprint, self.context,
self.provider)
def build_parameters(self, stack, provider_stack=None):
"""Builds the parameters for our stack
Args:
stack (:class:`cloudformation.stack`): A Cloudformation stack
provider_stack (:class:`stacker.providers.base.Provider`): An
optional Stacker provider object
Returns:
dict: The parameters for the given stack
"""
parameters = self._resolve_parameters(stack.cfn_parameters,
stack.blueprint)
required_params = [k for k, v in stack.blueprint.required_parameters]
parameters = self._handle_missing_parameters(parameters,
required_params,
provider_stack)
return [
{'ParameterKey': p[0],
'ParameterValue': str(p[1])} for p in parameters
]
def _build_stack_tags(self, stack):
"""Builds a common set of tags to attach to a stack"""
return [
{'Key': t[0], 'Value': t[1]} for t in self.context.tags.items()]
def _launch_stack(self, stack, **kwargs):
"""Handles the creating or updating of a stack in CloudFormation.
Also makes sure that we don't try to create or update a stack while
it is already updating or creating.
"""
if not should_submit(stack):
return NotSubmittedStatus()
try:
provider_stack = self.provider.get_stack(stack.fqn)
except exceptions.StackDoesNotExist:
provider_stack = None
old_status = kwargs.get("status")
if provider_stack and old_status == SUBMITTED:
logger.debug(
"Stack %s provider status: %s",
stack.fqn,
self.provider.get_stack_status(provider_stack),
)
if self.provider.is_stack_completed(provider_stack):
submit_reason = getattr(old_status, "reason", None)
return CompleteStatus(submit_reason)
elif self.provider.is_stack_in_progress(provider_stack):
logger.debug("Stack %s in progress.", stack.fqn)
return old_status
logger.debug("Resolving stack %s variables", stack.fqn)
stack.resolve_variables(self.context, self.provider)
logger.debug("Launching stack %s now.", stack.fqn)
template_url = self.s3_stack_push(stack.blueprint)
tags = self._build_stack_tags(stack)
parameters = self.build_parameters(stack, provider_stack)
new_status = None
if not provider_stack:
new_status = SubmittedStatus("creating new stack")
logger.debug("Creating new stack: %s", stack.fqn)
self.provider.create_stack(stack.fqn, template_url, parameters,
tags)
else:
if not should_update(stack):
return NotUpdatedStatus()
try:
new_status = SubmittedStatus("updating existing stack")
self.provider.update_stack(stack.fqn, template_url, parameters,
tags)
logger.debug("Updating existing stack: %s", stack.fqn)
except StackDidNotChange:
return DidNotChangeStatus()
return new_status
def _handle_missing_parameters(self, params, required_params,
existing_stack=None):
"""Handles any missing parameters.
If an existing_stack is provided, look up missing parameters there.
Args:
params (dict): key/value dictionary of stack definition parameters
required_params (list): A list of required parameter names.
existing_stack (dict): A dict representation of the stack. If
provided, will be searched for any missing parameters.
Returns:
list of tuples: The final list of key/value pairs returned as a
list of tuples.
Raises:
MissingParameterException: Raised if a required parameter is
still missing.
"""
missing_params = list(set(required_params) - set(params.keys()))
if existing_stack and 'Parameters' in existing_stack:
stack_params = {p['ParameterKey']: p['ParameterValue'] for p in
existing_stack['Parameters']}
for p in missing_params:
if p in stack_params:
value = stack_params[p]
logger.debug("Using parameter %s from existing stack: %s",
p, value)
params[p] = value
final_missing = list(set(required_params) - set(params.keys()))
if final_missing:
raise exceptions.MissingParameterException(final_missing)
return params.items()
def _generate_plan(self, tail=False):
plan_kwargs = {}
if tail:
plan_kwargs["watch_func"] = self.provider.tail_stack
plan = Plan(description="Create/Update stacks", **plan_kwargs)
stacks = self.context.get_stacks_dict()
dependencies = self._get_dependencies()
for stack_name in self.get_stack_execution_order(dependencies):
plan.add(
stacks[stack_name],
run_func=self._launch_stack,
requires=dependencies.get(stack_name),
)
return plan
def _get_dependencies(self):
dependencies = {}
for stack in self.context.get_stacks():
dependencies[stack.fqn] = stack.requires
return dependencies
def pre_run(self, outline=False, *args, **kwargs):
"""Any steps that need to be taken prior to running the action."""
pre_build = self.context.config.get("pre_build")
if not outline and pre_build:
util.handle_hooks("pre_build", pre_build, self.provider.region,
self.context)
def run(self, outline=False, tail=False, dump=False, *args, **kwargs):
"""Kicks off the build/update of the stacks in the stack_definitions.
This is the main entry point for the Builder.
"""
plan = self._generate_plan(tail=tail)
if not outline and not dump:
plan.outline(logging.DEBUG)
logger.debug("Launching stacks: %s", ", ".join(plan.keys()))
plan.execute()
else:
if outline:
plan.outline()
if dump:
plan.dump(dump)
def post_run(self, outline=False, *args, **kwargs):
"""Any steps that need to be taken after running the action."""
post_build = self.context.config.get("post_build")
if not outline and post_build:
util.handle_hooks("post_build", post_build, self.provider.region,
self.context)
| 37.447531 | 79 | 0.594742 |
11e82fd8a9fc588a77f9719d347e05d8719785a6
| 1,856 |
py
|
Python
|
examples/slack/slack_operator_mode.py
|
doru1004/rayvens
|
da89f405586a06b50cc8bb6273d8582400fbca9c
|
[
"Apache-2.0"
] | 24 |
2021-06-18T21:38:04.000Z
|
2022-02-16T19:16:49.000Z
|
examples/slack/slack_operator_mode.py
|
doru1004/rayvens
|
da89f405586a06b50cc8bb6273d8582400fbca9c
|
[
"Apache-2.0"
] | 11 |
2021-06-22T14:36:27.000Z
|
2021-12-09T16:33:15.000Z
|
examples/slack/slack_operator_mode.py
|
doru1004/rayvens
|
da89f405586a06b50cc8bb6273d8582400fbca9c
|
[
"Apache-2.0"
] | 5 |
2021-06-18T22:03:55.000Z
|
2021-08-02T05:11:46.000Z
|
#
# Copyright IBM Corporation 2021
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ray
import rayvens
import sys
# Send message to Slack sink using the kamel anywhere operator implementation.
# Command line arguments and validation:
if len(sys.argv) < 4:
print(f'usage: {sys.argv[0]} <slack_channel> <slack_webhook> <run_mode>')
sys.exit(1)
slack_channel = sys.argv[1]
slack_webhook = sys.argv[2]
run_mode = sys.argv[3]
if run_mode not in ['local', 'mixed', 'operator']:
raise RuntimeError(f'Invalid run mode provided: {run_mode}')
# Initialize ray either on the cluster or locally otherwise.
if run_mode == 'operator':
ray.init(address='auto')
else:
ray.init()
# Start rayvens in operator mode.
rayvens.init(mode=run_mode)
# Create stream.
stream = rayvens.Stream('slack')
# Event sink config.
sink_config = dict(kind='slack-sink',
route='/toslack',
channel=slack_channel,
webhook_url=slack_webhook)
# Add sink to stream.
sink = stream.add_sink(sink_config)
# Sends message to all sinks attached to this stream.
stream << f'Sending message to Slack sink in run mode {run_mode}.'
# Disconnect any sources or sinks attached to the stream 2 seconds after
# the stream is idle (i.e. no events were propagated by the stream).
stream.disconnect_all(after_idle_for=2)
| 30.933333 | 78 | 0.724138 |
fb3fcba2ed9cad66bcdb6583bfc0bb41d71613dc
| 6,625 |
py
|
Python
|
src/ccapi/model/model/metabolic/__init__.py
|
achillesrasquinha/CCPy
|
7a5b4fc008a9a0c90caee5d2d6a8c67393dcb822
|
[
"MIT"
] | 3 |
2019-12-30T23:13:22.000Z
|
2020-03-11T11:03:39.000Z
|
src/ccapi/model/model/metabolic/__init__.py
|
achillesrasquinha/CCPy
|
7a5b4fc008a9a0c90caee5d2d6a8c67393dcb822
|
[
"MIT"
] | 2 |
2019-12-20T20:01:01.000Z
|
2020-01-09T19:04:32.000Z
|
src/ccapi/model/model/metabolic/__init__.py
|
achillesrasquinha/CCPy
|
7a5b4fc008a9a0c90caee5d2d6a8c67393dcb822
|
[
"MIT"
] | 1 |
2019-12-30T23:13:46.000Z
|
2019-12-30T23:13:46.000Z
|
# imports - standard imports
from os.path import join
# imports - module imports
from ccapi.model.model.version import ModelVersion
from ccapi.core.querylist import QueryList
from ccapi.core.mixins import JupyterHTMLViewMixin
from ccapi.template import render_template
from bpyutils.util.string import ellipsis
from bpyutils.util.system import makepath
from bpyutils.util.request import download_file
from ccapi.constant import CONSTRAINT_BASED_MODEL_EXPORT_TYPE
from ccapi.core.config import Configuration
# imports - constraint-based model imports
from ccapi.model.model.metabolic.metabolite import Metabolite
from ccapi.model.model.metabolic.gene import Gene
from ccapi.model.model.metabolic.reaction import Reaction
config = Configuration()
class ConstraintBasedModel(ModelVersion, JupyterHTMLViewMixin):
_REPR_ATTRIBUTES = [
dict({
"name": "number_of_metabolites",
"title": "Number of Metabolites",
"key": lambda x: len(x.metabolites)
})
]
def __init__(self, *args, **kwargs):
self.super = super(ConstraintBasedModel, self)
self.super.__init__(*args, **kwargs)
self._metabolites = QueryList()
self._reactions = QueryList()
def _repr_html_(self):
repr_ = render_template(join("metabolic", "model.html"), context = dict({
"id": self.id,
"version": self.version,
"name": self.name,
"memory_address": "0x0%x" % id(self),
"number_of_metabolites": len(self.metabolites),
"metabolites": ellipsis(", ".join([s.name for s in self.metabolites]), threshold = 500),
"number_of_reactions": len(self.reactions),
"reactions": ellipsis(", ".join([s.name for s in self.reactions]), threshold = 500)
}))
return repr_
@property
def metabolites(self):
metabolites = getattr(self, "_metabolites", QueryList())
return metabolites
@metabolites.setter
def metabolites(self, value):
if self.metabolites == value:
pass
elif not isinstance(value, (list, tuple, QueryList)):
raise TypeError("ID must be an integer.")
else:
self._metabolites = value
if not isinstance(value, QueryList):
raise TypeError("Components must be of type (list, tuple, QueryList).")
else:
for metabolite in value:
if not isinstance(metabolite, Metabolite):
raise TypeError("Element must be of type Metabolite.")
self._metabolites = value
def add_metabolite(self, metabolite):
if not isinstance(metabolite, Metabolite):
raise TypeError("Metabolite must be of type %s, found %s." %
(Metabolite, type(metabolite))
)
else:
if metabolite in self.metabolites:
raise ValueError("Metabolite already exists.")
else:
self.metabolites.append(metabolite)
def add_metabolites(self, *metabolites):
for metabolite in metabolites:
if not isinstance(metabolite, Metabolite):
raise TypeError("Metabolite must be of type %s, found %s." %
(Metabolite, type(Metabolite))
)
for metabolite in metabolites:
self.add_metabolite(metabolite)
@property
def reactions(self):
reactions = getattr(self, "_reactions", QueryList())
return reactions
@reactions.setter
def reactions(self, value):
if self.reactions == value:
pass
elif not isinstance(value, (list, tuple, QueryList)):
raise TypeError("ID must be an integer.")
else:
self._reactions = value
if not isinstance(value, QueryList):
raise TypeError("Components must be of type (list, tuple, QueryList).")
else:
for reaction in value:
if not isinstance(reaction, Reaction):
raise TypeError("Element must be of type Reaction.")
self._reactions = value
def add_reaction(self, reaction):
if not isinstance(reaction, Reaction):
raise TypeError("Reaction must be of type %s, found %s." %
(Reaction, type(reaction))
)
else:
if reaction in self.reactions:
raise ValueError("Reaction already exists.")
else:
self.reactions.append(reaction)
def add_reactions(self, *reactions):
for reaction in reactions:
if not isinstance(reaction, Reaction):
raise TypeError("Reaction must be of type %s, found %s." %
(Reaction, type(Reaction))
)
for reaction in reactions:
self.add_reaction(reaction)
def write(self, path = None, type = "sbml", **kwargs):
type_ = CONSTRAINT_BASED_MODEL_EXPORT_TYPE[type]["value"]
params = { "version": self.version, "type": type_ }
response = self.client.request("GET", "api/model/%s/export" % self.id,
params = params)
if not path:
header = response.headers["content-disposition"]
name = re.findall("filename=(.+)", header)[0]
path = abspath(name)
nchunk = kwargs.get("nchunk", config.max_chunk_download_bytes)
makepath(path)
path = download_file(response, path, chunk_size = nchunk)
return path
def to_json(self):
data = self.super.to_json()
data["id"] = str(self.version)
data["metabolites"] = [ ]
for metabolite in self.metabolites:
json = metabolite.to_json()
data["metabolites"].append(json)
data["reactions"] = [ ]
for reaction in self.reactions:
json = reaction.to_json()
data["reactions"].append(json)
data["genes"] = [ ]
return data
def analyse(self, type_ = "fba"):
model = self.to_json()
data = dict(type = "metabolic", model = model,
analysis = type_)
response = self.client.post("api/model/analyse", json = data)
content = response.json()
return content
| 35.427807 | 110 | 0.571774 |
3fbd629dbe7fc993bfaa03b9b0bfd2edf7245826
| 272 |
py
|
Python
|
checker/rfid/read.py
|
Rami87/Door-lock-raspbarrypi
|
181fed07e577c81a20450d5a32f147c9f7c8cd6f
|
[
"MIT"
] | null | null | null |
checker/rfid/read.py
|
Rami87/Door-lock-raspbarrypi
|
181fed07e577c81a20450d5a32f147c9f7c8cd6f
|
[
"MIT"
] | null | null | null |
checker/rfid/read.py
|
Rami87/Door-lock-raspbarrypi
|
181fed07e577c81a20450d5a32f147c9f7c8cd6f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import RPi.GPIO as GPIO
import SimpleMFRC522
import time
reader = SimpleMFRC522.SimpleMFRC522()
while(True):
try:
id, text = reader.read()
print(id)
print(text)
time.sleep(1)
finally:
GPIO.cleanup()
| 16 | 38 | 0.613971 |
57a80709df834486c76f7b9cbc73dbe1e3308cd0
| 1,628 |
py
|
Python
|
tests/basics/with1.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 13,648 |
2015-01-01T01:34:51.000Z
|
2022-03-31T16:19:53.000Z
|
tests/basics/with1.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 7,092 |
2015-01-01T07:59:11.000Z
|
2022-03-31T23:52:18.000Z
|
tests/basics/with1.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 4,942 |
2015-01-02T11:48:50.000Z
|
2022-03-31T19:57:10.000Z
|
class CtxMgr:
def __enter__(self):
print("__enter__")
return self
def __exit__(self, a, b, c):
print("__exit__", repr(a), repr(b))
with CtxMgr() as a:
print(isinstance(a, CtxMgr))
try:
with CtxMgr() as a:
raise ValueError
except ValueError:
print("ValueError")
class CtxMgr2:
def __enter__(self):
print("__enter__")
return self
def __exit__(self, a, b, c):
print("__exit__", repr(a), repr(b))
return True
try:
with CtxMgr2() as a:
raise ValueError
print("No ValueError2")
except ValueError:
print("ValueError2")
# These recursive try-finally tests are attempt to get some interpretation
# of last phrase in http://docs.python.org/3.4/library/dis.html#opcode-WITH_CLEANUP
# "If the stack represents an exception, and the function call returns a 'true'
# value, this information is "zapped" and replaced with a single WHY_SILENCED
# to prevent END_FINALLY from re-raising the exception. (But non-local gotos
# will still be resumed.)"
print("===")
with CtxMgr2() as a:
try:
try:
raise ValueError
print("No ValueError3")
finally:
print("finally1")
finally:
print("finally2")
print("===")
try:
try:
with CtxMgr2() as a:
try:
try:
raise ValueError
print("No ValueError3")
finally:
print("finally1")
finally:
print("finally2")
finally:
print("finally3")
finally:
print("finally4")
| 22.611111 | 83 | 0.580467 |
b6ecee56b03f118d4bc483b9c92853ab507def17
| 1,077 |
py
|
Python
|
networking_vpp/db/migration/alembic_migrations/versions/ocata/expand/51f8d5ee1a46_l3_plugin_for_vpp.py
|
fepan/networking-vpp
|
3032013dc78893ec1bf41537026649f4e934020c
|
[
"Apache-2.0"
] | 1 |
2021-07-17T07:50:03.000Z
|
2021-07-17T07:50:03.000Z
|
networking_vpp/db/migration/alembic_migrations/versions/ocata/expand/51f8d5ee1a46_l3_plugin_for_vpp.py
|
fepan/networking-vpp
|
3032013dc78893ec1bf41537026649f4e934020c
|
[
"Apache-2.0"
] | null | null | null |
networking_vpp/db/migration/alembic_migrations/versions/ocata/expand/51f8d5ee1a46_l3_plugin_for_vpp.py
|
fepan/networking-vpp
|
3032013dc78893ec1bf41537026649f4e934020c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Cisco Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""L3 plugin for VPP
Revision ID: 51f8d5ee1a46
Revises: 6a909ba3748c
Create Date: 2016-10-17 16:39:11.037544
"""
# revision identifiers, used by Alembic.
revision = '51f8d5ee1a46'
down_revision = '6a909ba3748c'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('vpp_router_vrfs',
sa.Column('router_id', sa.String(36), primary_key=True),
sa.Column('vrf_id', sa.Integer, nullable=False))
| 29.916667 | 78 | 0.707521 |
f946b076e325451875dd09690194581d9267cab1
| 7,741 |
py
|
Python
|
env/Lib/site-packages/plotly/graph_objs/sunburst/marker/_line.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 7 |
2022-01-16T12:28:16.000Z
|
2022-03-04T15:31:45.000Z
|
env/Lib/site-packages/plotly/graph_objs/sunburst/marker/_line.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 14 |
2021-10-20T23:33:47.000Z
|
2021-12-21T04:50:37.000Z
|
env/Lib/site-packages/plotly/graph_objs/sunburst/marker/_line.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 1 |
2021-11-29T22:55:05.000Z
|
2021-11-29T22:55:05.000Z
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "sunburst.marker"
_path_str = "sunburst.marker.line"
_valid_props = {"color", "colorsrc", "width", "widthsrc"}
# color
# -----
@property
def color(self):
"""
Sets the color of the line enclosing each sector. Defaults to
the `paper_bgcolor` value.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the line enclosing each sector.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# widthsrc
# --------
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `width`.
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the color of the line enclosing each sector.
Defaults to the `paper_bgcolor` value.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
width
Sets the width (in px) of the line enclosing each
sector.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
"""
def __init__(
self, arg=None, color=None, colorsrc=None, width=None, widthsrc=None, **kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sunburst.marker.Line`
color
Sets the color of the line enclosing each sector.
Defaults to the `paper_bgcolor` value.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
width
Sets the width (in px) of the line enclosing each
sector.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sunburst.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sunburst.marker.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
_v = arg.pop("widthsrc", None)
_v = widthsrc if widthsrc is not None else _v
if _v is not None:
self["widthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 32.800847 | 86 | 0.547733 |
0bada741fe05670f463d850df05c157ef508172d
| 1,526 |
py
|
Python
|
gallery/models.py
|
szsu-ryerson/ImageUpload
|
163cc99aaf309fd46084384d47b0bfe34e1b5c2d
|
[
"MIT"
] | null | null | null |
gallery/models.py
|
szsu-ryerson/ImageUpload
|
163cc99aaf309fd46084384d47b0bfe34e1b5c2d
|
[
"MIT"
] | null | null | null |
gallery/models.py
|
szsu-ryerson/ImageUpload
|
163cc99aaf309fd46084384d47b0bfe34e1b5c2d
|
[
"MIT"
] | 2 |
2019-06-23T00:27:58.000Z
|
2019-06-23T02:21:57.000Z
|
from gallery import app, db
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
db.create_all()
class LoginUser(db.Model, UserMixin):
__tablename__ = 'user'
email = db.Column(db.String, primary_key=True)
authenticated = db.Column(db.Boolean, default=False)
pw_hash = db.Column(db.String)
def is_active(self):
return True
def get_id(self):
return self.email
def is_authenticated(self):
return self.authenticated
def is_anonymous(self):
return False
def set_password(self, password):
self.pw_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.pw_hash, password)
class Appuser(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
images = db.relationship("Appimage", backref="appuser", lazy=True)
def __repr__(self):
return '<Appuser %r>' % self.email
def get_id(self):
return self.id
class Appimage(db.Model):
id = db.Column(db.Integer, primary_key=True)
URL = db.Column(db.String(120), unique=True, nullable=False)
appuser_id = db.Column(db.Integer, db.ForeignKey('appuser.id'))
def __repr__(self):
return '<Appimage %r: %s>' % (self.URL,self.appuser.email)
| 28.259259 | 73 | 0.663827 |
071fe37426022baeee5d74540fb0a90e6dd31178
| 382 |
py
|
Python
|
miamm/recipes/forms.py
|
kimond/miamm
|
b351ea22c1d48e1ff7012099dda1474e4658a617
|
[
"BSD-3-Clause"
] | 2 |
2015-01-27T15:03:58.000Z
|
2015-01-27T16:29:56.000Z
|
miamm/recipes/forms.py
|
kimond/miamm
|
b351ea22c1d48e1ff7012099dda1474e4658a617
|
[
"BSD-3-Clause"
] | null | null | null |
miamm/recipes/forms.py
|
kimond/miamm
|
b351ea22c1d48e1ff7012099dda1474e4658a617
|
[
"BSD-3-Clause"
] | null | null | null |
from django import forms
from recipes.models import *
class RecipeIngredientForm(forms.ModelForm):
class Meta:
model = RecipeIngredient
fields = '__all__'
class StepForm(forms.ModelForm):
class Meta:
model = Step
fields = '__all__'
class RecipeForm(forms.ModelForm):
class Meta:
model = Recipe
fields = '__all__'
| 18.190476 | 44 | 0.643979 |
1a599615125892d97bbbac367a462d63ce00a44c
| 7,342 |
py
|
Python
|
src/BPL_hotspots/src/find_bpl_hotspots.py
|
space-isa/get-connected-2.0
|
b5487ab97689e347dd30caefd01885095f8e8dff
|
[
"MIT"
] | null | null | null |
src/BPL_hotspots/src/find_bpl_hotspots.py
|
space-isa/get-connected-2.0
|
b5487ab97689e347dd30caefd01885095f8e8dff
|
[
"MIT"
] | null | null | null |
src/BPL_hotspots/src/find_bpl_hotspots.py
|
space-isa/get-connected-2.0
|
b5487ab97689e347dd30caefd01885095f8e8dff
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# Tested with Python 3.8.6
#------------------------------------------------------------------------------
# find_bpl_hotspots.py
#------------------------------------------------------------------------------
# Author: Isabel J. Rodriguez
# 2021.01.23
#------------------------------------------------------------------------------
"""
Scrape data from the Bklyn Reach website and generate a csv file containing
relevant information from participating libraries in the BPL system.
INPUTS
------
NONE
Uses the existing Bklyn Reach url: https://www.bklynlibrary.org/reach/
OUTPUTS
-------
Output file:
"bpl_wifi.csv"
Data included:
LIBRARY
ADDRESS
WI-FI PROGRAM
AVAILABILITY
LIBRARY WEBSITE
"""
# Standard Python library imports
import csv
import sys
import time
# Companion scripts
from write_to_csv import write_to_csv
from exception_handler import exception_handler
from soupify_webpage import parse_html
# Geolocator
from geopy.geocoders import Nominatim
def pull_wifi_data():
# fetch html
bpl_reach_url= 'https://www.bklynlibrary.org/reach/'
webpage_soup = parse_html(bpl_reach_url)
# parse html content
containers = webpage_soup.findAll("div", {"class" : "panel-body"})
# containers[0] has all active participating libraries
# containers[1] libraries listed as having a program 'coming soon'
list_active = containers[0].ul.findAll("li")
return list_active
def geolocate_coordinates(street_address=None):
if street_address is not None:
try:
geolocator = Nominatim(user_agent="bpl_wifi")
location = geolocator.geocode(street_address)
print(location.address)
latitude = str(location.latitude)
longitude = str(location.longitude)
except AttributeError:
latitude = 'NaN'
longitude = 'NaN'
return latitude, longitude
def pull_address_data(url=None):
"""
Libraries with active extended wi-fi programs have their websites listed.
Access websites and pull street address and zip code. If an street address
intersection is given e.g.,
"16 Brighton First Rd. at Brighton Beach Ave."
remove the intersection and return e.g., "16 Brighton First Rd."
"""
if url is not None:
webpage_soup = parse_html(url)
street_container = webpage_soup.findAll("div", {"class":"street-block"})
zip_container = webpage_soup.findAll("div", {"class":"addressfield-container-inline locality-block country-US"})
street_address = street_container[0].div.text
zip_code = zip_container[0].findAll("span", {"class":"postal-code"})[0].text
# clean address data
split_address = street_address.split()
stopwords = ['at', '(near', '(Near', '(at', '(@']
# remove street intersection
for stopword in stopwords:
if stopword in split_address:
street_address = split_address[:split_address.index(stopword)]
street_address = ' '.join(street_address)
else:
pass
# addresses with street numbers spelled out decreases accuracy
# replace with number (e.g., first --> 1st)
# this is done on a case-by-case basis but could be generalized
if 'First' in street_address:
street_address = street_address.replace("First", "1st")
else:
pass
if 'Fourth' in street_address:
street_address = street_address.replace("Fourth", "4th")
# grab geolocation data
latitude, longitude = geolocate_coordinates(street_address=street_address + ', Brooklyn')
return street_address, zip_code, latitude, longitude
def store_data(list_active):
"""
Create a dictionary to store information for Brooklyn Public
Libraries participating in the Bklyn Reach extended wi-fi program.
"""
# Bklyn Reach service details
wifi_range = '300 feet'
wifi_availability = '24/7'
wifi_program = 'Bklyn Reach'
city = 'Brooklyn'
state = 'New York'
# create a storage container for BPL data
bp_libraries = {list_active[i].text: {'STREET ADDRESS' : '',
'CITY' : city,
'STATE' : state,
'ZIP CODE' : '',
'LATITUDE' : '',
'LONGITUDE' : '',
'WI-FI PROGRAM': wifi_program,
'AVAILABILITY': wifi_availability,
'WI-FI RANGE' : wifi_range,
'LIBRARY WEBSITE': '' }
for i in range(len(list_active))}
print("Compiling data...")
for i in range (len(list_active)):
nested_dict = bp_libraries[list_active[i].text]
street_address, zip_code, latitude, longitude = pull_address_data(list_active[i].a["href"])
nested_dict['STREET ADDRESS'] = street_address
nested_dict['ZIP CODE'] = zip_code
nested_dict['LATITUDE'] = latitude
nested_dict['LONGITUDE'] = longitude
nested_dict['LIBRARY WEBSITE'] = list_active[i].a["href"]
return bp_libraries
def write_data_to_csv(bp_libraries,
output_filename=None,
output_folder=None):
"""
Pull data from storage dictionary into a list of lists,
and write to csv.
ARGUMENTS
---------
bp_libraries : dict
output_filename : str
e.g., "bpl_wifi.csv"
output_folder : str
RETURNS
-------
None
"""
output = []
# Order and sort data into output container
for key, val in bp_libraries.items():
output.append([key,
val['STREET ADDRESS'],
val['CITY'],
val['STATE'],
val['ZIP CODE'],
val['LATITUDE'],
val['LONGITUDE'],
val['WI-FI PROGRAM'],
val['AVAILABILITY'],
val['LIBRARY WEBSITE']])
output.sort(key=lambda header: header[0])
print("Compilation complete. Writing out to a csv file.")
write_to_csv(output_filename=output_filename,
output_folder=output_folder,
output=output)
@exception_handler
def main(output_filename=None):
"""
Contains a pipeline that accepts an input csv file, and
outputs processed and sorted data into an output csv file.
ARGUMENTS
---------
output_filename : str
e.g., "wifi.csv"
RETURNS
-------
None
"""
list_active = pull_wifi_data()
bp_libraries = store_data(list_active)
write_data_to_csv(bp_libraries,
output_filename=output_filename,
output_folder=output_folder)
if __name__ == "__main__":
date = time.strftime("%m%d%Y")
output_folder = "../output/"
output_filename = "bpl_wifi_{}.csv".format(date)
main(output_filename)
| 32.343612 | 120 | 0.565786 |
e366a72a0bf8b91fbe3d3e4ad9a1215a5a3614be
| 2,688 |
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20200301/get_virtual_network_gateway_advertised_routes.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31 |
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/network/v20200301/get_virtual_network_gateway_advertised_routes.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231 |
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/network/v20200301/get_virtual_network_gateway_advertised_routes.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4 |
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayAdvertisedRoutesResult',
'AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult',
'get_virtual_network_gateway_advertised_routes',
]
@pulumi.output_type
class GetVirtualNetworkGatewayAdvertisedRoutesResult:
"""
List of virtual network gateway routes.
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.GatewayRouteResponseResult']]:
"""
List of gateway routes.
"""
return pulumi.get(self, "value")
class AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult(GetVirtualNetworkGatewayAdvertisedRoutesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayAdvertisedRoutesResult(
value=self.value)
def get_virtual_network_gateway_advertised_routes(peer: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult:
"""
List of virtual network gateway routes.
:param str peer: The IP address of the peer.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['peer'] = peer
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200301:getVirtualNetworkGatewayAdvertisedRoutes', __args__, opts=opts, typ=GetVirtualNetworkGatewayAdvertisedRoutesResult).value
return AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult(
value=__ret__.value)
| 38.4 | 190 | 0.697917 |
622e414483684a0ef56f738ba607a731e1428ae2
| 445 |
py
|
Python
|
custom_types.py
|
Adman/messenger-stats
|
184c3a3c8915089a211d2b68a8913855558d5c66
|
[
"MIT"
] | 6 |
2018-02-04T18:57:37.000Z
|
2020-01-11T23:32:03.000Z
|
custom_types.py
|
franeklubi/messenger-stats
|
3d1eb401326e79a7a84c565c62d976f0954a7d5a
|
[
"MIT"
] | 7 |
2018-01-30T21:39:08.000Z
|
2020-09-28T04:35:41.000Z
|
custom_types.py
|
franeklubi/messenger-stats
|
3d1eb401326e79a7a84c565c62d976f0954a7d5a
|
[
"MIT"
] | 3 |
2019-11-02T22:58:16.000Z
|
2020-10-28T03:42:21.000Z
|
import datetime
from typing import NamedTuple, List
Message = NamedTuple('Message', [('sender', str), ('text', str), ('created_at', datetime.datetime)])
Participants = List[str]
Conversation = NamedTuple('Conversation', [('participants', Participants), ('messages', List[Message])])
NamedConversation = NamedTuple('NamedConversation',
[('name', str), ('participants', Participants), ('messages', List[Message])])
| 49.444444 | 108 | 0.674157 |
adf144b913ec184188bbfa16110d2ad8e09863b1
| 1,476 |
py
|
Python
|
assignment1/optimizer/_base_optimizer.py
|
simonanez/deep-learning-cs7643
|
2ccd3ca336e49abe83ba6516919314db72f9bb8c
|
[
"MIT"
] | 3 |
2022-01-16T14:46:57.000Z
|
2022-02-20T22:40:16.000Z
|
assignment1/optimizer/_base_optimizer.py
|
steven-rr/deep-learning-cs7643
|
2ccd3ca336e49abe83ba6516919314db72f9bb8c
|
[
"MIT"
] | null | null | null |
assignment1/optimizer/_base_optimizer.py
|
steven-rr/deep-learning-cs7643
|
2ccd3ca336e49abe83ba6516919314db72f9bb8c
|
[
"MIT"
] | 6 |
2021-09-29T11:42:37.000Z
|
2022-02-02T02:33:51.000Z
|
class _BaseOptimizer:
def __init__(self, learning_rate=1e-4, reg=1e-3):
self.learning_rate = learning_rate
self.reg = reg
def update(self, model):
pass
def apply_regularization(self, model):
'''
Apply L2 penalty to the model. Update the gradient dictionary in the model
:param model: The model with gradients
:return: None, but the gradient dictionary of the model should be updated
'''
#############################################################################
# TODO: #
# 1) Apply L2 penalty to model weights based on the regularization #
# coefficient #
#############################################################################
if len(model.weights) < 2:
model.gradients['W1'] = model.gradients['W1'] + self.reg * model.weights['W1']
else:
model.gradients['W1'] = model.gradients['W1'] + self.reg * model.weights['W1']
model.gradients['W2'] = model.gradients['W2'] + self.reg * model.weights['W2']
#############################################################################
# END OF YOUR CODE #
#############################################################################
| 50.896552 | 90 | 0.381436 |
f1cb69e9959973504930eeb3dab9f3129fb6d37c
| 12,974 |
py
|
Python
|
tfx/orchestration/kubeflow/v2/step_builder_test.py
|
TimoKerr/tfx
|
10d13d57eeac21514fed73118cb43464dada67f1
|
[
"Apache-2.0"
] | null | null | null |
tfx/orchestration/kubeflow/v2/step_builder_test.py
|
TimoKerr/tfx
|
10d13d57eeac21514fed73118cb43464dada67f1
|
[
"Apache-2.0"
] | null | null | null |
tfx/orchestration/kubeflow/v2/step_builder_test.py
|
TimoKerr/tfx
|
10d13d57eeac21514fed73118cb43464dada67f1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Kubeflow V2 step builder."""
from typing import Any, Dict
from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2
import tensorflow as tf
from tfx import components
from tfx.dsl.components.common import importer
from tfx.dsl.components.common import resolver
from tfx.dsl.input_resolution.strategies import latest_artifact_strategy
from tfx.dsl.input_resolution.strategies import latest_blessed_model_strategy
from tfx.extensions.google_cloud_big_query.example_gen import component as big_query_example_gen_component
from tfx.orchestration import data_types
from tfx.orchestration.kubeflow.v2 import step_builder
from tfx.orchestration.kubeflow.v2 import test_utils
from tfx.proto import example_gen_pb2
from tfx.types import channel
from tfx.types import channel_utils
from tfx.types import standard_artifacts
_TEST_CMDS = ('python', '-m', 'my_entrypoint.app_module')
class StepBuilderTest(tf.test.TestCase):
def _sole(self, d: Dict[Any, Any]) -> Any:
"""Asserts the dictionary has length 1 and returns the only value."""
self.assertLen(d, 1)
return list(d.values())[0]
def testBuildTask(self):
query = 'SELECT * FROM TABLE'
bq_example_gen = big_query_example_gen_component.BigQueryExampleGen(
query=query)
deployment_config = pipeline_pb2.PipelineDeploymentConfig()
component_defs = {}
my_builder = step_builder.StepBuilder(
node=bq_example_gen,
image='gcr.io/tensorflow/tfx:latest',
deployment_config=deployment_config,
component_defs=component_defs,
enable_cache=True)
actual_step_spec = self._sole(my_builder.build())
actual_component_def = self._sole(component_defs)
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_bq_example_gen_component.pbtxt',
pipeline_pb2.ComponentSpec()), actual_component_def)
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_bq_example_gen_task.pbtxt',
pipeline_pb2.PipelineTaskSpec()), actual_step_spec)
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_bq_example_gen_executor.pbtxt',
pipeline_pb2.PipelineDeploymentConfig()), deployment_config)
def testBuildContainerTask(self):
task = test_utils.DummyProducerComponent(
output1=channel_utils.as_channel([standard_artifacts.Model()]),
param1='value1',
)
deployment_config = pipeline_pb2.PipelineDeploymentConfig()
component_defs = {}
my_builder = step_builder.StepBuilder(
node=task,
image='gcr.io/tensorflow/tfx:latest', # Note this has no effect here.
deployment_config=deployment_config,
component_defs=component_defs)
actual_step_spec = self._sole(my_builder.build())
actual_component_def = self._sole(component_defs)
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_dummy_container_spec_component.pbtxt',
pipeline_pb2.ComponentSpec()), actual_component_def)
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_dummy_container_spec_task.pbtxt',
pipeline_pb2.PipelineTaskSpec()), actual_step_spec)
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_dummy_container_spec_executor.pbtxt',
pipeline_pb2.PipelineDeploymentConfig()), deployment_config)
def testBuildContainerTask2(self):
task = test_utils.dummy_producer_component(
output1=channel_utils.as_channel([standard_artifacts.Model()]),
param1='value1',
)
deployment_config = pipeline_pb2.PipelineDeploymentConfig()
component_defs = {}
my_builder = step_builder.StepBuilder(
node=task,
image='gcr.io/tensorflow/tfx:latest',
deployment_config=deployment_config,
component_defs=component_defs)
actual_step_spec = self._sole(my_builder.build())
actual_component_def = self._sole(component_defs)
# Same as in testBuildContainerTask
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_dummy_container_spec_component.pbtxt',
pipeline_pb2.ComponentSpec()), actual_component_def)
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_dummy_container_spec_task.pbtxt',
pipeline_pb2.PipelineTaskSpec()), actual_step_spec)
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_dummy_container_spec_executor.pbtxt',
pipeline_pb2.PipelineDeploymentConfig()), deployment_config)
def testBuildFileBasedExampleGen(self):
beam_pipeline_args = ['runner=DataflowRunner']
example_gen = components.CsvExampleGen(input_base='path/to/data/root')
deployment_config = pipeline_pb2.PipelineDeploymentConfig()
component_defs = {}
my_builder = step_builder.StepBuilder(
node=example_gen,
image='gcr.io/tensorflow/tfx:latest',
image_cmds=_TEST_CMDS,
beam_pipeline_args=beam_pipeline_args,
deployment_config=deployment_config,
component_defs=component_defs)
actual_step_spec = self._sole(my_builder.build())
actual_component_def = self._sole(component_defs)
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_csv_example_gen_component.pbtxt',
pipeline_pb2.ComponentSpec()), actual_component_def)
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_csv_example_gen_task.pbtxt',
pipeline_pb2.PipelineTaskSpec()), actual_step_spec)
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_csv_example_gen_executor.pbtxt',
pipeline_pb2.PipelineDeploymentConfig()), deployment_config)
def testBuildFileBasedExampleGenWithInputConfig(self):
input_config = example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(name='train', pattern='*train.tfr'),
example_gen_pb2.Input.Split(name='eval', pattern='*test.tfr')
])
example_gen = components.ImportExampleGen(
input_base='path/to/data/root', input_config=input_config)
deployment_config = pipeline_pb2.PipelineDeploymentConfig()
component_defs = {}
my_builder = step_builder.StepBuilder(
node=example_gen,
image='gcr.io/tensorflow/tfx:latest',
deployment_config=deployment_config,
component_defs=component_defs)
actual_step_spec = self._sole(my_builder.build())
actual_component_def = self._sole(component_defs)
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_import_example_gen_component.pbtxt',
pipeline_pb2.ComponentSpec()), actual_component_def)
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_import_example_gen_task.pbtxt',
pipeline_pb2.PipelineTaskSpec()), actual_step_spec)
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_import_example_gen_executor.pbtxt',
pipeline_pb2.PipelineDeploymentConfig()), deployment_config)
def testBuildImporter(self):
impt = importer.Importer(
source_uri='m/y/u/r/i',
properties={
'split_names': '["train", "eval"]',
},
custom_properties={
'str_custom_property': 'abc',
'int_custom_property': 123,
},
artifact_type=standard_artifacts.Examples).with_id('my_importer')
deployment_config = pipeline_pb2.PipelineDeploymentConfig()
component_defs = {}
my_builder = step_builder.StepBuilder(
node=impt,
deployment_config=deployment_config,
component_defs=component_defs)
actual_step_spec = self._sole(my_builder.build())
actual_component_def = self._sole(component_defs)
self.assertProtoEquals(
test_utils.get_proto_from_test_data('expected_importer_component.pbtxt',
pipeline_pb2.ComponentSpec()),
actual_component_def)
self.assertProtoEquals(
test_utils.get_proto_from_test_data('expected_importer_task.pbtxt',
pipeline_pb2.PipelineTaskSpec()),
actual_step_spec)
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_importer_executor.pbtxt',
pipeline_pb2.PipelineDeploymentConfig()), deployment_config)
def testBuildLatestBlessedModelStrategySucceed(self):
latest_blessed_resolver = resolver.Resolver(
strategy_class=latest_blessed_model_strategy.LatestBlessedModelStrategy,
model=channel.Channel(type=standard_artifacts.Model),
model_blessing=channel.Channel(
type=standard_artifacts.ModelBlessing)).with_id('my_resolver2')
test_pipeline_info = data_types.PipelineInfo(
pipeline_name='test-pipeline', pipeline_root='gs://path/to/my/root')
deployment_config = pipeline_pb2.PipelineDeploymentConfig()
component_defs = {}
my_builder = step_builder.StepBuilder(
node=latest_blessed_resolver,
deployment_config=deployment_config,
pipeline_info=test_pipeline_info,
component_defs=component_defs)
actual_step_specs = my_builder.build()
model_blessing_resolver_id = 'my_resolver2-model-blessing-resolver'
model_resolver_id = 'my_resolver2-model-resolver'
self.assertSameElements(actual_step_specs.keys(),
[model_blessing_resolver_id, model_resolver_id])
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_latest_blessed_model_resolver_component_1.pbtxt',
pipeline_pb2.ComponentSpec()),
component_defs[model_blessing_resolver_id])
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_latest_blessed_model_resolver_task_1.pbtxt',
pipeline_pb2.PipelineTaskSpec()),
actual_step_specs[model_blessing_resolver_id])
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_latest_blessed_model_resolver_component_2.pbtxt',
pipeline_pb2.ComponentSpec()), component_defs[model_resolver_id])
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_latest_blessed_model_resolver_task_2.pbtxt',
pipeline_pb2.PipelineTaskSpec()),
actual_step_specs[model_resolver_id])
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_latest_blessed_model_resolver_executor.pbtxt',
pipeline_pb2.PipelineDeploymentConfig()), deployment_config)
def testBuildLatestArtifactResolverSucceed(self):
latest_model_resolver = resolver.Resolver(
strategy_class=latest_artifact_strategy.LatestArtifactStrategy,
model=channel.Channel(type=standard_artifacts.Model),
examples=channel.Channel(
type=standard_artifacts.Examples)).with_id('my_resolver')
deployment_config = pipeline_pb2.PipelineDeploymentConfig()
component_defs = {}
test_pipeline_info = data_types.PipelineInfo(
pipeline_name='test-pipeline', pipeline_root='gs://path/to/my/root')
my_builder = step_builder.StepBuilder(
node=latest_model_resolver,
deployment_config=deployment_config,
pipeline_info=test_pipeline_info,
component_defs=component_defs)
actual_step_spec = self._sole(my_builder.build())
actual_component_def = self._sole(component_defs)
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_latest_artifact_resolver_component.pbtxt',
pipeline_pb2.ComponentSpec()), actual_component_def)
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_latest_artifact_resolver_task.pbtxt',
pipeline_pb2.PipelineTaskSpec()), actual_step_spec)
self.assertProtoEquals(
test_utils.get_proto_from_test_data(
'expected_latest_artifact_resolver_executor.pbtxt',
pipeline_pb2.PipelineDeploymentConfig()), deployment_config)
if __name__ == '__main__':
tf.test.main()
| 42.537705 | 106 | 0.729459 |
ff36f8ab0b6e6408a5d3aee7e5759437afaacd16
| 4,753 |
py
|
Python
|
rtpy/system_and_configuration.py
|
amitron72/rtpy
|
d5b71dbb6b5cb431a53e07e380624249f59218bf
|
[
"Apache-2.0"
] | 14 |
2019-05-21T08:52:31.000Z
|
2021-05-31T00:23:36.000Z
|
rtpy/system_and_configuration.py
|
amitron72/rtpy
|
d5b71dbb6b5cb431a53e07e380624249f59218bf
|
[
"Apache-2.0"
] | 3 |
2019-10-25T11:42:25.000Z
|
2020-11-16T15:34:02.000Z
|
rtpy/system_and_configuration.py
|
amitron72/rtpy
|
d5b71dbb6b5cb431a53e07e380624249f59218bf
|
[
"Apache-2.0"
] | 10 |
2019-02-18T08:48:33.000Z
|
2021-06-22T09:45:00.000Z
|
# coding: utf-8
# Copyright (C) 2018 Orange
#
# This software is distributed under the terms and conditions of the 'Apache-2.0'
# license which can be found in the 'LICENSE.md' file
# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
"""Functions for the SYSTEM AND CONFIGURATION REST API Methods category."""
from .tools import RtpyBase
class RtpySystemAndConfiguration(RtpyBase):
"""SYSTEM AND CONFIGURATION methods category."""
def system_info(self, **kwargs):
"""
Get general system information.
Parameters
----------
**kwargs
Keyword arguments
"""
api_method = self._category + "System Info"
return self._request("GET", self._prefix, api_method, kwargs)
def system_health_ping(self, **kwargs):
"""
Get a simple status response about the state of Artifactory.
Parameters
----------
**kwargs
Keyword arguments
"""
api_method = self._category + "System Health Ping"
target = self._prefix + "ping"
return self._request("GET", target, api_method, kwargs)
# def verify_connection()
def general_configuration(self, **kwargs):
"""
Get the general configuration (artifactory.config.xml).
Parameters
----------
**kwargs
Keyword arguments
"""
api_method = self._category + "General Configuration"
target = self._prefix + "configuration"
return self._request("GET", target, api_method, kwargs)
def save_general_configuration(self, xml_file_path, **kwargs):
"""
Save the general configuration (artifactory.config.xml).
Parameters
----------
xml_file_path: str
Path of the xml file to POST
**kwargs
Keyword arguments
"""
api_method = self._category + "Save General Configuration"
target = self._prefix + "configuration"
myparams = {"Content-Type": "application/xml"}
with open(xml_file_path, "rb") as files:
return self._request(
"POST", target, api_method, kwargs, params=myparams, data=files
)
# Unsupported method
# def update_custom_url_base(new_url)
def license_information(self, **kwargs):
"""
Retrieve information about the currently installed license.
Parameters
----------
**kwargs
Keyword arguments
"""
api_method = self._category + "Licence Information"
target = self._prefix + "license"
return self._request("GET", target, api_method, kwargs)
def install_license(self, params, **kwargs):
"""
Install new license key or change the current one.
Parameters
----------
params: str
Settings of the license
**kwargs
Keyword arguments
"""
# The JSON output in case of an error is currently incorrect
api_method = self._category + "Install License"
target = self._prefix + "license"
return self._request("POST", target, api_method, kwargs, params=params)
# Unsupported methods
# def ha_license_information()
# def install_ha_cluster_licenses()
# def delete_ha_cluster_license()
def version_and_addons_information(self, **kwargs):
"""
Retrieve information about versions and addons.
(the current Artifactory version, revision, and currently installed Add-ons).
Parameters
----------
**kwargs
Keyword arguments
"""
api_method = self._category + "Versions and Add-ons Information"
target = self._prefix + "version"
return self._request("GET", target, api_method, kwargs)
def get_reverse_proxy_configuration(self, **kwargs):
"""
Retrieve the reverse proxy configuration.
Parameters
----------
**kwargs
Keyword arguments
"""
api_method = self._category + "Get Reverse Proxy Configuration"
target = self._prefix + "configuration/webServer"
return self._request("GET", target, api_method, kwargs)
# Unsupported method
# def update_reverse_proxy_configuration()
def get_reverse_proxy_snippet(self, **kwargs):
"""
Get the reverse proxy configuration snippet in text format.
Parameters
----------
**kwargs
Keyword arguments
"""
api_method = self._category + "Get Reverse Proxy Snippet"
target = self._prefix + "configuration/reverseProxy/nginx"
return self._request("GET", target, api_method, kwargs)
| 28.806061 | 85 | 0.601515 |
ef12fa785d1680451e8e5b3dd36427a8995cb72e
| 9,897 |
py
|
Python
|
gibolt/model.py
|
Kozea/gibolt
|
5af60ada3f611aba3d6a25d61cf060c8f71351eb
|
[
"MIT"
] | 6 |
2015-04-10T21:30:41.000Z
|
2021-05-03T21:10:44.000Z
|
gibolt/model.py
|
Kozea/gibolt
|
5af60ada3f611aba3d6a25d61cf060c8f71351eb
|
[
"MIT"
] | 138 |
2015-04-08T09:55:55.000Z
|
2021-07-27T09:41:37.000Z
|
gibolt/model.py
|
Kozea/gibolt
|
5af60ada3f611aba3d6a25d61cf060c8f71351eb
|
[
"MIT"
] | 2 |
2015-11-01T19:05:48.000Z
|
2019-04-23T13:00:50.000Z
|
import datetime
import json
from sqlalchemy import (
Boolean,
Column,
DateTime,
ForeignKey,
Integer,
Numeric,
String,
Text,
TypeDecorator,
UniqueConstraint,
func,
)
from sqlalchemy.event import listens_for
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import backref, relationship
from sqlalchemy.types import Enum, NullType
from . import app
Base = declarative_base()
item_types = ["checklist", "indicator"]
meeting_types = [
type_name for type_id, type_name in app.config["MEETINGS_TYPES"]
]
label_types = ["ack", "circle", "priority", "qualifier"]
role_types = ["leadlink", "elected", "assigned"]
class SQLiteJson(TypeDecorator):
impl = String
class Comparator(String.Comparator):
def __getitem__(self, index):
if isinstance(index, tuple):
index = "$%s" % (
"".join(
[
"[%s]" % elem
if isinstance(elem, int)
else '."%s"' % elem
for elem in index
]
)
)
elif isinstance(index, int):
index = "$[%s]" % index
else:
index = '$."%s"' % index
# json_extract does not appear to return JSON sub-elements
# which is weird.
return func.json_extract(self.expr, index, type_=NullType)
comparator_factory = Comparator
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
class Label(Base):
__tablename__ = "label"
__table_args__ = (UniqueConstraint("text", name="text"),)
label_id = Column(
Integer, autoincrement=True, primary_key=True, nullable=False
)
label_type = Column(Enum(*label_types))
text = Column(String)
color = Column(String)
class Priority(Base):
__tablename__ = "priority"
__table_args__ = (UniqueConstraint("value", name="value_unique"),)
priority_id = Column(
Integer, autoincrement=True, primary_key=True, nullable=False
)
label_id = Column(
Integer,
ForeignKey("label.label_id", name="fk_priority_label"),
nullable=False,
)
value = Column(Integer)
labels = relationship(
Label,
backref=backref("priorities", cascade="all,delete", uselist=False),
)
class Circle(Base):
__tablename__ = "circle"
circle_id = Column(
Integer, primary_key=True, autoincrement=True, nullable=False
)
parent_circle_id = Column(
Integer, ForeignKey("circle.circle_id"), nullable=True
)
label_id = Column(
Integer,
ForeignKey("label.label_id", name="fk_circle_label"),
nullable=True,
)
circle_name = Column(String, unique=True)
circle_purpose = Column(String)
circle_domain = Column(String)
circle_accountabilities = Column(String)
is_active = Column(Boolean, default=True, nullable=False)
circle_children = relationship(
"Circle", backref=backref("circle_parent", remote_side=[circle_id])
)
circle_milestones = relationship(
"MilestoneCircle",
backref=backref("milestone_circle", remote_side=[circle_id]),
)
label = relationship(Label, backref="circle")
@property
def user_ids(self):
user_ids = set()
for role in self.roles:
if not role.is_active:
continue
for focus in role.role_focuses:
if not focus.latest_user:
continue
user_ids.add(focus.latest_user.user_id)
return user_ids
@listens_for(Circle.is_active, "set")
def receive_attribute_change(target, value, oldvalue, initiator):
if target.circle_children:
for child in target.circle_children:
child.is_active = value
class Role(Base):
__tablename__ = "role"
role_id = Column(
Integer, primary_key=True, autoincrement=True, nullable=False
)
circle_id = Column(
Integer,
ForeignKey("circle.circle_id", name="fk_role_circle"),
nullable=False,
)
role_name = Column(String)
role_purpose = Column(String)
role_domain = Column(String)
role_accountabilities = Column(String)
is_active = Column(Boolean, default=True, nullable=False)
role_type = Column(Enum(*role_types))
circle = relationship(Circle, backref="roles")
class RoleFocus(Base):
__tablename__ = "role_focus"
role_focus_id = Column(
Integer, primary_key=True, autoincrement=True, nullable=False
)
role_id = Column(
Integer,
ForeignKey("role.role_id", name="fk_focus_role"),
nullable=False,
)
focus_name = Column(String, default="", nullable=False)
duration = Column(Integer)
role = relationship(
Role, backref=backref("role_focuses", cascade="all, delete-orphan")
)
@property
def latest_user(self):
users = self.role_focus_users
for user in users:
if user.end_date is None:
return user
else:
if user.start_date is not None:
start = user.start_date.date()
if datetime.date.today() < start:
return None
end = user.end_date.date()
if datetime.date.today() <= end:
return user
class RoleFocusUser(Base):
__tablename__ = "role_focus_user"
role_focus_user_id = Column(
Integer, primary_key=True, autoincrement=True, nullable=False
)
role_focus_id = Column(
Integer,
ForeignKey("role_focus.role_focus_id", name="fk_user_focus"),
nullable=False,
)
user_id = Column(Integer)
start_date = Column(DateTime)
end_date = Column(DateTime)
role_focus = relationship(
RoleFocus,
backref=backref(
"role_focus_users",
cascade="all, delete-orphan",
order_by=lambda: (
RoleFocusUser.end_date.desc().nullsfirst(),
RoleFocusUser.start_date.desc().nullsfirst(),
),
),
)
class Item(Base):
__tablename__ = "item"
item_id = Column(
Integer, primary_key=True, autoincrement=True, nullable=False
)
role_focus_id = Column(
Integer,
ForeignKey("role_focus.role_focus_id", name="fk_item_rolefocus"),
nullable=False,
)
item_type = Column(Enum(*item_types))
content = Column(Text)
role_focus = relationship(
RoleFocus, backref=backref("items", cascade="all, delete-orphan")
)
class Report(Base):
__tablename__ = "report"
report_id = Column(
Integer, primary_key=True, autoincrement=True, nullable=False
)
circle_id = Column(Integer, ForeignKey("circle.circle_id"), nullable=False)
report_type = Column(Enum(*meeting_types))
created_at = Column(DateTime, default=datetime.datetime.now)
author_id = Column(Integer)
content = Column(Text)
modified_at = Column(DateTime, onupdate=datetime.datetime.now)
modified_by = Column(Integer)
is_submitted = Column(Boolean, default=False, nullable=False)
circle = relationship(Circle, backref="reports")
attendees = relationship(
"ReportAttendee", cascade="all,delete", backref="report"
)
actions = relationship(
"ReportChecklist", cascade="all,delete", backref="report"
)
indicators = relationship(
"ReportIndicator", cascade="all,delete", backref="report"
)
projects = relationship(
"ReportMilestone", cascade="all,delete", backref="report"
)
agenda = relationship(
"ReportAgenda", cascade="all,delete", backref="report"
)
class MilestoneCircle(Base):
__tablename__ = "milestone_circle"
circle_id = Column(
Integer, ForeignKey("circle.circle_id"), primary_key=True
)
milestone_number = Column(Integer, primary_key=True)
repo_name = Column(String, primary_key=True)
is_active = Column(Boolean, default=True, nullable=False)
class ReportAttendee(Base):
__tablename__ = "report_attendee"
report_id = Column(
Integer, ForeignKey("report.report_id"), primary_key=True
)
user_id = Column(Integer, primary_key=True)
user = Column(SQLiteJson)
checked = Column(Boolean, default=True, nullable=False)
class ReportChecklist(Base):
__tablename__ = "report_checklist"
report_id = Column(
Integer, ForeignKey("report.report_id"), primary_key=True
)
item_id = Column(Integer, ForeignKey("item.item_id"), primary_key=True)
content = Column(String)
checked = Column(Boolean, default=False, nullable=False)
item = relationship(Item)
class ReportIndicator(Base):
__tablename__ = "report_indicator"
report_id = Column(
Integer, ForeignKey("report.report_id"), primary_key=True
)
item_id = Column(Integer, ForeignKey("item.item_id"), primary_key=True)
content = Column(String)
value = Column(Numeric)
class ReportMilestone(Base):
__tablename__ = "report_milestone"
report_id = Column(
Integer, ForeignKey("report.report_id"), primary_key=True
)
milestone_number = Column(Integer, primary_key=True)
repo_name = Column(String, primary_key=True)
milestone = Column(SQLiteJson)
class ReportAgenda(Base):
__tablename__ = "report_agenda"
report_id = Column(
Integer, ForeignKey("report.report_id"), primary_key=True
)
ticket_id = Column(Integer, primary_key=True)
ticket = Column(SQLiteJson)
| 29.900302 | 79 | 0.632414 |
ba131c1b3e2d9d6d602c35c7bd4a775badba5b6c
| 667 |
py
|
Python
|
10_processes/code/threading_simple.py
|
lluxury/P_U_S_A
|
1eb9d1fef74f9ce3618ae950f5223f598510be84
|
[
"MIT"
] | null | null | null |
10_processes/code/threading_simple.py
|
lluxury/P_U_S_A
|
1eb9d1fef74f9ce3618ae950f5223f598510be84
|
[
"MIT"
] | null | null | null |
10_processes/code/threading_simple.py
|
lluxury/P_U_S_A
|
1eb9d1fef74f9ce3618ae950f5223f598510be84
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from threading import Thread
from time import sleep
import atexit
from datetime import datetime
class SpawnThread(Thread):
"""Runs threads"""
count = 0
def run(self):
SpawnThread.count += 1
threadnum = SpawnThread.count
print "Thread %s begins at %s" % (threadnum, datetime.now())
sleep(5)
print "Thread %s is done at %s" % (threadnum, datetime.now())
def cleanup():
print "All Threads are finished"
if __name__ == "__main__":
try:
for i in range(5):
secondThread = SpawnThread()
secondThread.start()
finally:
atexit.register(cleanup)
| 22.233333 | 69 | 0.61919 |
9e85feb2cff6cf8b50eed0916bf54aca55ce83c8
| 489 |
py
|
Python
|
zoo/public/regnet/regnetx_032.py
|
megvii-research/basecls
|
6b395a0a888370b4523764afb78a5a7634a3f6cd
|
[
"Apache-2.0"
] | 23 |
2021-12-08T02:35:01.000Z
|
2022-03-16T02:23:19.000Z
|
zoo/public/regnet/regnetx_032.py
|
megvii-research/basecls
|
6b395a0a888370b4523764afb78a5a7634a3f6cd
|
[
"Apache-2.0"
] | 4 |
2021-12-23T11:31:17.000Z
|
2022-02-28T01:35:31.000Z
|
zoo/public/regnet/regnetx_032.py
|
megvii-research/basecls
|
6b395a0a888370b4523764afb78a5a7634a3f6cd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from basecls.configs import RegNetConfig
_cfg = dict(
batch_size=64,
model=dict(
name="regnetx_032",
),
solver=dict(
basic_lr=0.05,
),
model_ema=dict(
enabled=True,
momentum=0.99996,
),
)
class Cfg(RegNetConfig):
def __init__(self, values_or_file=None, **kwargs):
super().__init__(_cfg)
self.merge(values_or_file, **kwargs)
| 20.375 | 58 | 0.621677 |
eda35b5ea006ff0149c27db7f170abfd00aa693f
| 3,350 |
py
|
Python
|
app/wordlists.py
|
fterrier/gender-decoder
|
62544ab317bdda1c1e97c27932890b4c68909a47
|
[
"MIT"
] | null | null | null |
app/wordlists.py
|
fterrier/gender-decoder
|
62544ab317bdda1c1e97c27932890b4c68909a47
|
[
"MIT"
] | null | null | null |
app/wordlists.py
|
fterrier/gender-decoder
|
62544ab317bdda1c1e97c27932890b4c68909a47
|
[
"MIT"
] | 1 |
2019-01-29T14:12:19.000Z
|
2019-01-29T14:12:19.000Z
|
# These words are written as the stem to make it easier to match all variants.
# In other words, the suffix is intentionally left out.
feminine_coded_words = [
"agree",
"affectionate",
"child",
"cheer",
"collab",
"commit",
"communal",
"compassion",
"connect",
"considerate",
"cooperat",
"co-operat",
"depend",
"emotiona",
"empath",
"feel",
"flatterable",
"gentle",
"honest",
"interpersonal",
"interdependen",
"interpersona",
"inter-personal",
"inter-dependen",
"inter-persona",
"kind",
"kinship",
"loyal",
"modesty",
"nag",
"nurtur",
"pleasant",
"polite",
"quiet",
"respon",
"sensitiv",
"submissive",
"support",
"sympath",
"tender",
"together",
"trust",
"understand",
"warm",
"whin",
"enthusias",
"inclusive",
"yield",
"share",
"sharin",
# german starts here
"bedächtig",
"bescheiden",
"ehrlich",
"ehrlich",
"emotional",
"empathisch",
"engagiert",
"freundlich",
"fröhlich",
"fürsorglich",
"gemeinsam",
"gemeinschaftlich",
"gestalterisch",
"kommunikativ",
"kooperativ",
"kreativ",
"leidenschaftlich",
"loyal",
"mitfühlend",
"partnerschaftlich",
"resilient",
"sympathisch",
"teamfähig",
"teamorientiert",
"unterstützend",
"verantwortungsvoll",
"verlässlich",
"verständnisvoll",
"vertrauensvoll",
"weiblich",
"zusammen",
"zuverlässig",
"zwischenmenschlich"
]
masculine_coded_words = [
"active",
"adventurous",
"aggress",
"ambitio",
"analy",
"assert",
"athlet",
"autonom",
"battle",
"boast",
"challeng",
"champion",
"compet",
"confident",
"courag",
"decid",
"decision",
"decisive",
"defend",
"determin",
"domina",
"dominant",
"driven",
"fearless",
"fight",
"force",
"greedy",
"head-strong",
"headstrong",
"hierarch",
"hostil",
"impulsive",
"independen",
"individual",
"intellect",
"lead",
"logic",
"objective",
"opinion",
"outspoken",
"persist",
"principle",
"reckless",
"self-confiden",
"self-relian",
"self-sufficien",
"selfconfiden",
"selfrelian",
"selfsufficien",
"stubborn",
"superior",
"unreasonab",
# german starts here
"aktiv",
"ambitioniert",
"analytisch",
"direkt",
"dominant",
"durchsetzungsfähig",
"durchsetzungsstark",
"ehrgeizig",
"entscheidend",
"entschlossen",
"führend",
"herausfordernd",
"hierarchisch",
"individuell",
"kompetetiv",
"kraftvoll",
"logisch",
"männlich",
"objektiv",
"selbstbewusst",
"stark",
"überdurchschnittlich",
"unabhängig",
"unabhängig",
"zielstrebig"
]
feminine_sentences = [
"prise en charge"
]
masculine_sentences = [
"alpha male"
]
hyphenated_coded_words = [
"co-operat",
"inter-personal",
"inter-dependen",
"inter-persona",
"self-confiden",
"self-relian",
"self-sufficien"
]
possible_codings = (
"strongly feminine-coded",
"feminine-coded",
"neutral",
"masculine-coded",
"strongly masculine-coded"
)
| 17.005076 | 78 | 0.550149 |
50dcb4eb1883f0c016452aab5b2a7e61b06c7730
| 295 |
py
|
Python
|
manabi/nose_plugins.py
|
aehlke/manabi
|
1dfdd4ecb9c1214b6a70268be0dcfeda9da8754b
|
[
"MIT"
] | 14 |
2015-10-03T07:34:28.000Z
|
2021-09-20T07:10:29.000Z
|
manabi/nose_plugins.py
|
aehlke/manabi
|
1dfdd4ecb9c1214b6a70268be0dcfeda9da8754b
|
[
"MIT"
] | 23 |
2019-10-25T08:47:23.000Z
|
2022-01-30T02:00:45.000Z
|
manabi/nose_plugins.py
|
aehlke/manabi
|
1dfdd4ecb9c1214b6a70268be0dcfeda9da8754b
|
[
"MIT"
] | 7 |
2016-10-04T08:10:36.000Z
|
2021-09-20T07:10:33.000Z
|
import logging
from nose.plugins import Plugin
class SilenceSouth(Plugin):
south_logging_level = logging.ERROR
def configure(self, options, conf):
super(SilenceSouth, self).configure(options, conf)
logging.getLogger('south').setLevel(self.south_logging_level)
| 22.692308 | 69 | 0.725424 |
ff54134042ee87ba08752ff8bd5e3701f300040b
| 2,402 |
py
|
Python
|
algorithms/code/leetcode/lc046_permutations/lc046_permutations.py
|
altermarkive/training
|
6a13f5b2f466156ad5db0e25da0e601d2404b4c3
|
[
"MIT"
] | null | null | null |
algorithms/code/leetcode/lc046_permutations/lc046_permutations.py
|
altermarkive/training
|
6a13f5b2f466156ad5db0e25da0e601d2404b4c3
|
[
"MIT"
] | 1 |
2022-02-16T11:28:56.000Z
|
2022-02-16T11:28:56.000Z
|
algorithms/code/leetcode/lc046_permutations/lc046_permutations.py
|
altermarkive/training
|
6a13f5b2f466156ad5db0e25da0e601d2404b4c3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# https://leetcode.com/problems/permutations/
import unittest
from typing import List
class Solution:
def __permute(self, prefix, remaining, permutations):
if len(remaining) == 0:
permutations.append(prefix.copy())
else:
for value in remaining:
prefix.append(value)
reduced = set(remaining)
reduced.remove(value)
self.__permute(prefix, reduced, permutations)
prefix.pop()
def permute(self, nums: List[int]) -> List[List[int]]:
permutations = []
remaining = set(nums)
self.__permute([], remaining, permutations)
return permutations
class TestCode(unittest.TestCase):
@staticmethod
def cmp_to_key(mycmp): # pragma: no cover
class K:
def __init__(self, obj, *_):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
@staticmethod
def integer_list_comparator(l1, l2): # pragma: no cover
if len(l1) < len(l2):
return -1
if len(l1) > len(l2):
return 1
for l1i, l2i in zip(l1, l2):
if l1i < l2i:
return -1
if l1i > l2i:
return 1
return 0
def test_example(self):
nums = [1, 2, 3]
expected = [
[1, 2, 3], [1, 3, 2], [2, 1, 3],
[2, 3, 1], [3, 1, 2], [3, 2, 1]]
result = Solution().permute(nums)
result = sorted(
result, key=TestCode.cmp_to_key(TestCode.integer_list_comparator))
self.assertEqual(len(expected), len(result))
for i, _ in enumerate(expected):
self.assertEqual(len(expected[i]), len(result[i]))
for j, _ in enumerate(expected[i]):
self.assertEqual(expected[i][j], result[i][j])
| 30.025 | 78 | 0.52373 |
87fe3fcee7ea293ad36fb71605f242b42ceec87d
| 6,415 |
py
|
Python
|
tests/test_distributed.py
|
laudv/treeck
|
3e4165c146f644af6387fa7f890ad93d1ca84d11
|
[
"Apache-2.0"
] | 4 |
2020-02-03T20:03:32.000Z
|
2020-03-29T16:07:53.000Z
|
tests/test_distributed.py
|
laudv/treeck
|
3e4165c146f644af6387fa7f890ad93d1ca84d11
|
[
"Apache-2.0"
] | null | null | null |
tests/test_distributed.py
|
laudv/treeck
|
3e4165c146f644af6387fa7f890ad93d1ca84d11
|
[
"Apache-2.0"
] | null | null | null |
#import matplotlib.pyplot as plt
import unittest, json
import numpy as np
import z3
import importlib
import treeck
from treeck import *
from treeck.verifier import Verifier
from treeck.z3backend import Z3Backend as Backend
from treeck.distributed import DistributedVerifier, VerifierFactory
from dask.distributed import Client
from start_dask import start_local
dask_scheduler = "localhost:8786"
class TestDistributedVerifier(unittest.TestCase):
def test_img_generate_splits(self):
class VFactory(VerifierFactory):
def __call__(self, lk, check_paths):
v = Verifier(lk, Backend())
v.add_constraint(v.fvar() < 0.0)
v.add_constraint(v.xvar(0) > 50)
v.add_constraint(v.xvar(1) < 50)
return v
with Client(dask_scheduler) as client:
client.restart()
N = 10
at = AddTree.read("tests/models/xgb-img-easy.json")
dt = DomTree(at, {})
dv = DistributedVerifier(client, dt, VFactory(),
check_paths = True,
num_initial_tasks = N,
stop_when_num_sats = N)
dv.check()
#print(json.dumps(dv.results, indent=2, default=str))
count_with_status = 0
count_with_sat = 0
for k, d in dv.results.items():
if isinstance(k, int) and "status" in d:
count_with_status += 1
if d["status"].is_sat():
count_with_sat += 1
self.assertEqual(count_with_status, N)
self.assertGreater(count_with_sat, 0)
def test_bin_mnist(self):
class VFactory(VerifierFactory):
def __call__(self, lk, check_paths):
v = Verifier(lk, Backend())
v.add_constraint(v.fvar() > 5.0)
v.add_constraint(z3.PbLe([(v.xvar(fid).get(), 1)
for fid in v.instance(0).feat_ids()], 50))
return v
with Client(dask_scheduler) as client:
client.restart()
N = 10
at = AddTree.read("tests/models/xgb-mnist-bin-yis1-intermediate.json")
dt = DomTree(at, {})
dv = DistributedVerifier(client, dt, VFactory(),
check_paths = False,
num_initial_tasks = N,
timeout_start = 5.0,
stop_when_num_sats = N)
dv.check()
#print(json.dumps(dv.results, indent=2, default=repr))
#print(dt.tree())
count_with_status = 0
count_with_sat = 0
for k, d in dv.results.items():
if isinstance(k, int) and "status" in d:
count_with_status += 1
if d["status"] == Verifier.Result.SAT:
count_with_sat += 1
self.assertGreater(d["model"]["f"], 5.0)
inst = [True] * (28*28)
for i, x in d["model"]["xs"].items():
inst[i] = x
self.assertEqual(d["model"]["f"], at.predict_single(inst))
#print(d["model"]["f"], at.predict_single(inst))
self.assertGreaterEqual(count_with_status, N)
self.assertGreater(count_with_sat, 0)
def test_img_multi_instance(self):
class VFactory(VerifierFactory):
def __call__(self, lk, check_paths):
v = Verifier(lk, Backend())
v.add_constraint(v.fvar(instance=0) < 0.0)
v.add_constraint(v.xvar(0, instance=0) > 50)
v.add_constraint(v.xvar(1, instance=0) < 50)
# instances are exactly the same!
for fid1, fid2 in zip(v.instance(0).feat_ids(), v.instance(1).feat_ids()):
v.add_constraint(v.xvar(fid1, instance=0) == v.xvar(fid2, instance=1))
v.add_constraint(v.fvar(instance=1).get() - v.fvar(instance=0).get() < 9999)
return v
with Client(dask_scheduler) as client:
client.restart()
N = 10
at0 = AddTree.read("tests/models/xgb-img-easy.json")
at1 = AddTree.read("tests/models/xgb-img-easy.json")
at1.base_score = 10000
dt = DomTree([(at0, {}), (at1, {})])
dv = DistributedVerifier(client, dt, VFactory(),
check_paths = True,
num_initial_tasks = N,
stop_when_num_sats = N)
dv.check()
#print(json.dumps(dv.results, indent=2, default=str))
#print(dt.tree())
count_with_status = 0
count_with_sat = 0
for k, d in dv.results.items():
if isinstance(k, int) and "status" in d:
count_with_status += 1
if d["status"].is_sat():
count_with_sat += 1
self.assertEqual(count_with_status, N)
self.assertEqual(count_with_sat, 0)
#def test_adv(self):
# instance_key = 0
# model = "xgb-mnist-yis0-easy"
# offset = 10
# max_sum_offset = 50
# def vfactory_aux(instance, offset, max_sum_offset, doms, at):
# v = Verifier(doms, at, Backend(), Strategy())
# sum_constraint = 0
# for j, pixel in zip(range(v.num_features), instance):
# x = v.xvar(j)
# v.add_constraint((x > max(0, pixel-offset)) & (x < min(255, pixel+offset)))
# sum_constraint += z3.If(x.get()-pixel <= 0, pixel-x.get(), x.get()-pixel)
# v.add_constraint(sum_constraint < max_sum_offset)
# return v
# with open("tests/models/mnist-instances.json") as f:
# instance_key = 0
# instance = np.array(json.load(f)[str(instance_key)])
# vfactory = lambda doms, at: vfactory_aux(instance, offset, max_sum_offset, doms, at)
# with Client("tcp://localhost:30333") as client:
# at = AddTree.read(f"tests/models/{model}.json")
# dv = DistributedVerifier(client, at, vfactory)
# dv.run()
if __name__ == "__main__":
z3.set_pp_option("rational_to_decimal", True)
z3.set_pp_option("precision", 3)
z3.set_pp_option("max_width", 130)
unittest.main()
| 38.413174 | 93 | 0.535776 |
e87385161c0508778bb45a297c96982984bdcb39
| 1,832 |
py
|
Python
|
benchmark/startPyquil2749.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startPyquil2749.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startPyquil2749.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=37
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(3) # number=31
prog += CZ(0,3) # number=32
prog += H(3) # number=33
prog += X(3) # number=29
prog += CNOT(0,3) # number=30
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=8
prog += X(2) # number=34
prog += Y(1) # number=19
prog += H(0) # number=9
prog += Y(2) # number=10
prog += Y(2) # number=11
prog += Y(3) # number=20
prog += Y(1) # number=12
prog += RX(-2.158274153016188,3) # number=24
prog += H(0) # number=16
prog += CZ(2,0) # number=17
prog += H(0) # number=18
prog += CNOT(1,0) # number=21
prog += Z(1) # number=22
prog += CNOT(1,0) # number=23
prog += H(0) # number=25
prog += CZ(2,0) # number=26
prog += H(0) # number=27
prog += X(0) # number=35
prog += X(0) # number=36
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil2749.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 25.09589 | 64 | 0.563865 |
9b374f19d3465e613217bdf5165031584a57a296
| 9,272 |
py
|
Python
|
emaileasily/emaileasily.py
|
erastusnzula/easy-email
|
3eceb871e5cee6c123e32010b0ef3ede1048684c
|
[
"MIT"
] | null | null | null |
emaileasily/emaileasily.py
|
erastusnzula/easy-email
|
3eceb871e5cee6c123e32010b0ef3ede1048684c
|
[
"MIT"
] | null | null | null |
emaileasily/emaileasily.py
|
erastusnzula/easy-email
|
3eceb871e5cee6c123e32010b0ef3ede1048684c
|
[
"MIT"
] | null | null | null |
"""
Author: Erastus Nzula.
Licence: MIT.
Description: A simplified way to send and read emails.
Contribution status: open.
"""
import email
import imaplib
import os
import smtplib
import socket
import webbrowser
from email.header import decode_header
from email.message import EmailMessage
from tkinter.filedialog import askopenfilenames
message = EmailMessage()
filenames = []
body = []
global subject
def email_to(*receivers_addresses):
"""
:param receivers_addresses: The email main receivers' addresses.
:return: email receivers' addresses.
"""
to_recipients = []
header = 'To'
return loop_through_addresses(receivers_addresses, to_recipients, header)
def email_subject(subject_=None):
"""
:param subject_: the email subject.
:return: email subject.
"""
try:
message['Subject'] = subject_
return message['Subject']
except ValueError:
for key in message.keys():
del message[key]
print('Re-enter email subject.')
def email_bcc(*bcc_addresses):
"""
:param bcc_addresses: email bcc receivers' addresses.
:return: bcc receivers' addresses.
"""
bcc_recipients = []
header = 'Bcc'
return loop_through_addresses(bcc_addresses, bcc_recipients, header)
def email_cc(*cc_addresses):
"""
:param cc_addresses: the email copy receivers' addresses
:return: email address
"""
cc_recipients = []
header = 'Cc'
return loop_through_addresses(cc_addresses, cc_recipients, header)
def loop_through_addresses(addresses, recipients, header):
"""
Loop through all addresses.
:param addresses: user address input.
:param recipients: list to store all addresses.
:param header: message label (To, Bcc or Cc)
:return: email addresses.
"""
try:
for recipient in addresses:
recipients.append(recipient)
message[header] = recipients
return message[header]
except ValueError:
for key in message.keys():
del message[key]
print('Re-enter email address.')
def email_attach_document():
"""
Allows attachment of files from directory.
:return: files to attach.
"""
try:
documents = askopenfilenames(title='Select files to attach')
for document in documents:
with open(document, "rb") as file:
message.add_attachment(file.read(), maintype="application", subtype="octet-stream",
filename=os.path.basename(file.name))
filenames.append(os.path.basename(file.name))
print(f'Document: {os.path.basename(file.name)} attached successfully.')
except TypeError:
print('Please call the function email_attach_document after email_content')
def email_content(content=None):
"""
Accepts plain email content.
:param content: user plain content input.
:return: email body.
"""
body.append(content)
return message.set_content(content)
def email_html(content=None):
"""
Accepts html content input.
:param content: html content.
:return:email body in html format.
"""
body.append(content)
return message.set_content(f"""{content}""", subtype='html')
def email_send(sender_email, password, host="smtp.gmail.com", port=465):
"""
Logs in users and sends emails.
:param sender_email: sender email address.
:param password: sender password.
:param host: email provider host address.
:param port: email provider port.
:return: email send status.
"""
message['From'] = sender_email
try:
with smtplib.SMTP_SSL(host, port) as smtp:
smtp.login(sender_email, password)
print('Sending email...')
smtp.send_message(message)
print(f'Email successfully sent.')
smtp.quit()
for key in message.keys():
del message[key]
except (smtplib.SMTPRecipientsRefused, socket.gaierror):
for key in message.keys():
del message[key]
print('Failed to send email.')
def name_folder(subject_email):
"""
Returns the snake case naming convention for emails' attachment folders and filenames.
:param subject_email: email subject.
:return: folder name
"""
return "".join(c if c.isalnum() else "_" for c in subject_email)
def read_emails(email_address, email_password, number_of_emails=2, label='INBOX', host='imap.gmail.com',
port=993):
"""
Fetches emails and returns its content.
:param email_address: sender email address.
:param email_password: sender email password
:param number_of_emails: the number of emails to view.
:param label: the label to fetch emails from.
:param host: the email provider host address.
:param port: the email provider port
:return: fetched emails.
"""
global subject
imap = imaplib.IMAP4_SSL(host, port)
imap.login(email_address, email_password)
print('Successfully logged in, fetching emails...')
status, all_messages = imap.select(label)
messages = int(all_messages[0])
for i in range(messages, messages - number_of_emails, -1):
_, email_messages = imap.fetch(str(i), "(RFC822)")
for email_message in email_messages:
if isinstance(email_message, tuple):
msg = email.message_from_bytes(email_message[1])
get_subject_and_from(msg)
if msg.is_multipart():
get_multipart_email(msg)
else:
get_non_multipart_emails(msg)
close_imap(imap)
def close_imap(imap):
"""
Closes the imaplib connection and logs out the user.
:param imap: The imaplib connection.
:return: 0
"""
imap.close()
imap.logout()
def get_subject_and_from(msg):
"""
Gets the email subject, date and sender.
Convert them to human readable form.
:param msg: email content
:return: email subject, sender and date.
"""
global subject
subject, encoding = decode_header(msg['Subject'])[0]
if isinstance(subject, bytes):
try:
subject = subject.decode(encoding)
except TypeError:
pass
sender, encoding = decode_header(msg.get("From"))[0]
if isinstance(sender, bytes):
sender = sender.decode(encoding)
date, encoding = decode_header(msg.get("Date"))[0]
if isinstance(date, bytes):
date = date.decode(encoding)
print('==' * 50)
print("Subject: ", subject)
print("From: ", sender)
print("Date: ", date)
def get_multipart_email(msg):
"""
Classifies multipart emails based on content type.
Prints the body of emails without attachments.
For emails with attachments it returns the get_attachments function.
:param msg: email content.
:return: email_body.
"""
global subject
for part in msg.walk():
content_type = part.get_content_type()
content_disposition = str(part.get("Content-Disposition"))
email_body = None
try:
email_body = part.get_payload(decode=True).decode()
except (AttributeError, UnicodeDecodeError):
pass
if content_type == "text/plain" and "attachment" not in content_disposition:
print(email_body)
elif "attachment" in content_disposition:
get_attachments(part)
def get_attachments(part):
"""
Gets the attached files in a email.
Creates a folder based on email subject.
Stores the attached in the folder.
:param part: The email attachment part
:return: email attached files.
"""
filename = part.get_filename()
if filename:
folder_name = name_folder(subject)
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
file_path = os.path.join(folder_name, filename)
open(file_path, "wb").write(part.get_payload(decode=True))
print('Attached files saved at: ' + file_path)
def get_non_multipart_emails(msg):
"""
Fetches emails without attachments.
If email content type is text/plain it prints out the email content(email body).
If email content type is text/html it returns the get_html_emails function.
:param msg: email message type
:return: email_body
"""
content_type = msg.get_content_type()
email_body = msg.get_payload(decode=True).decode()
if content_type == 'text/plain':
print(email_body)
if content_type == "text/html":
get_html_emails(email_body)
def get_html_emails(email_body):
"""
Creates a folder with name based on the email subject.
Creates a html file inside the folder.
Writes the email content in the file and opens it in a web browser.
:param email_body: fetched email body.
:return: email_body.
"""
try:
folder_name = name_folder(subject)
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
filename = subject + '.html'
file_path = os.path.join(folder_name, filename)
open(file_path, "w").write(email_body)
print(email_body)
webbrowser.open(file_path)
except UnicodeEncodeError:
pass
| 30.5 | 104 | 0.652286 |
71ab76621233ea2662b083f8b086e87608f006c1
| 3,287 |
py
|
Python
|
setup.py
|
eddiejessup/nestler
|
f42dfb4132dd94e1166c4a96afad77c825319282
|
[
"MIT"
] | null | null | null |
setup.py
|
eddiejessup/nestler
|
f42dfb4132dd94e1166c4a96afad77c825319282
|
[
"MIT"
] | null | null | null |
setup.py
|
eddiejessup/nestler
|
f42dfb4132dd94e1166c4a96afad77c825319282
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='nestler',
version='0.1.0',
description='Generate reports from Markdown files with embedded Python',
long_description=long_description,
url='https://github.com/eddiejessup/nestler',
author='Elliot Marsden',
author_email='[email protected]',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
],
keywords='jupyter report data-science analytics',
# You can specify the packages manually here if your project is
# simple, or use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# Run-time dependencies. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'pyyaml',
'pyparsing',
'pypandoc',
'jupyter',
],
# Additional groups of dependencies (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
# extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
include_package_data=True,
# package_data={
# 'assets': ['nestler.data'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'nestler = nestler.nestler:main',
'start-kernel = nestler.start_kernel:main',
],
},
)
| 33.886598 | 94 | 0.654092 |
718857f5299955fba696766cc102fdc881e9ddf9
| 1,312 |
py
|
Python
|
optimization_work/distributed_solution.py
|
pghartig/Power-Control
|
c23613b74c9fe1a1ecd6d415f5bf0cb625920661
|
[
"MIT"
] | 7 |
2020-04-19T01:58:47.000Z
|
2022-03-24T02:24:23.000Z
|
optimization_work/distributed_solution.py
|
pghartig/Power-Control
|
c23613b74c9fe1a1ecd6d415f5bf0cb625920661
|
[
"MIT"
] | null | null | null |
optimization_work/distributed_solution.py
|
pghartig/Power-Control
|
c23613b74c9fe1a1ecd6d415f5bf0cb625920661
|
[
"MIT"
] | 3 |
2021-03-21T12:34:11.000Z
|
2021-08-15T17:23:48.000Z
|
from network_simulations import het_net
import cvxpy as cp
import matplotlib.pyplot as plt
import numpy as np
import copy
"""
first setup the network according using the het_net class then consolidate all of the information from the network to solve the central optimization problem
"""
def test_dist_optimization():
num_users = 10
num_antenna = num_users + 5
network = het_net.Het_Network(5, 30, num_users, num_antenna, .1, power_vector_setup=True)
# network.update_beam_formers()
for_comp = copy.deepcopy(network)
for_comp.update_beam_formers()
# Choose number of iterations to allow
num_iterations = 500
utilities, duals = network.allocate_power_step(num_iterations)
test_utilities, test_duals = for_comp.allocate_power_step(num_iterations)
network.print_layout()
plt.figure()
plt.plot(np.arange(num_iterations+1), utilities, label = "moore-pensose")
plt.plot(np.arange(num_iterations+1), test_utilities, label = "optimized")
plt.legend(loc = "lower left")
plt.figure()
duals = np.asarray(duals)
for columns in range(duals.shape[1]):
plt.plot(duals[:, columns])
plt.figure()
test_duals = np.asarray(test_duals)
for columns in range(test_duals.shape[1]):
plt.plot(test_duals[:, columns])
plt.show()
| 32 | 156 | 0.724848 |
76185f73eae7962a02b011b6b084ac810596b874
| 1,458 |
py
|
Python
|
src/backend/migrations/versions/20191221_164504_881811805554_.py
|
zackramjan/motuz
|
892252eb50acbd8135bf9df9872df5e4cfe6277b
|
[
"MIT"
] | 84 |
2019-05-10T14:56:48.000Z
|
2022-03-19T17:07:24.000Z
|
src/backend/migrations/versions/20191221_164504_881811805554_.py
|
zackramjan/motuz
|
892252eb50acbd8135bf9df9872df5e4cfe6277b
|
[
"MIT"
] | 226 |
2019-05-28T21:59:22.000Z
|
2022-03-09T10:58:24.000Z
|
src/backend/migrations/versions/20191221_164504_881811805554_.py
|
zackramjan/motuz
|
892252eb50acbd8135bf9df9872df5e4cfe6277b
|
[
"MIT"
] | 16 |
2019-09-27T01:35:49.000Z
|
2022-03-08T16:18:50.000Z
|
"""empty message
Revision ID: 881811805554
Revises: 1035b06e75e0
Create Date: 2019-12-21 16:45:04.180704
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '881811805554'
down_revision = '1035b06e75e0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('revoked_token',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('jti', sa.String(), nullable=False),
sa.Column('type', sa.String(), nullable=False),
sa.Column('identity', sa.String(), nullable=False),
sa.Column('exp', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('jti')
)
op.drop_table('invalid_token')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('invalid_token',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('token', sa.VARCHAR(length=500), autoincrement=False, nullable=False),
sa.Column('blacklisted_on', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('id', name='invalid_token_pkey'),
sa.UniqueConstraint('token', name='invalid_token_token_key')
)
op.drop_table('revoked_token')
# ### end Alembic commands ###
| 32.4 | 93 | 0.70096 |
ec7d21490f4fcd547b34ea57e4f55f9201d5feef
| 3,773 |
py
|
Python
|
cuml_bench/knn_clsf.py
|
KalyanovD/scikit-learn_bench
|
13f05203e3c62fe576c91a897d576c32e2c3e50a
|
[
"Apache-2.0"
] | 59 |
2019-06-13T18:25:55.000Z
|
2022-03-28T19:33:23.000Z
|
cuml_bench/knn_clsf.py
|
KalyanovD/scikit-learn_bench
|
13f05203e3c62fe576c91a897d576c32e2c3e50a
|
[
"Apache-2.0"
] | 45 |
2018-10-10T16:17:21.000Z
|
2022-03-25T16:00:58.000Z
|
cuml_bench/knn_clsf.py
|
KalyanovD/scikit-learn_bench
|
13f05203e3c62fe576c91a897d576c32e2c3e50a
|
[
"Apache-2.0"
] | 38 |
2018-10-08T17:07:25.000Z
|
2021-11-10T06:29:37.000Z
|
# ===============================================================================
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import argparse
import bench
from cuml.neighbors import KNeighborsClassifier
parser = argparse.ArgumentParser(
description='cuML kNN classifier benchmark')
parser.add_argument('--task', default='classification', type=str,
choices=('search', 'classification'),
help='kNN task: search or classification')
parser.add_argument('--n-neighbors', default=5, type=int,
help='Number of neighbors to use')
parser.add_argument('--weights', type=str, default='uniform',
help='Weight function used in prediction')
parser.add_argument('--method', type=str, default='brute',
help='Algorithm used to compute the nearest neighbors')
parser.add_argument('--metric', type=str, default='euclidean',
help='Distance metric to use')
params = bench.parse_args(parser)
# Load generated data
X_train, X_test, y_train, y_test = bench.load_data(params)
params.n_classes = y_train[y_train.columns[0]].nunique()
# Create classification object
knn_clsf = KNeighborsClassifier(n_neighbors=params.n_neighbors,
weights=params.weights,
algorithm=params.method,
metric=params.metric)
# Measure time and accuracy on fitting
train_time, _ = bench.measure_function_time(knn_clsf.fit, X_train, y_train,
params=params)
if params.task == 'classification':
y_pred = knn_clsf.predict(X_train)
train_acc = 100 * bench.accuracy_score(y_pred, y_train)
# Measure time and accuracy on prediction
if params.task == 'classification':
predict_time, yp = bench.measure_function_time(knn_clsf.predict, X_test,
params=params)
test_acc = 100 * bench.accuracy_score(yp, y_test)
else:
predict_time, _ = bench.measure_function_time(knn_clsf.kneighbors, X_test,
params=params)
if params.task == 'classification':
bench.print_output(library='cuml',
algorithm=knn_clsf.algorithm + '_knn_clsf',
stages=['training', 'prediction'], params=params,
functions=['knn_clsf.fit', 'knn_clsf.predict'],
times=[train_time, predict_time],
metrics=[train_acc, test_acc], metric_type='accuracy[%]',
data=[X_train, X_test], alg_instance=knn_clsf)
else:
bench.print_output(library='cuml',
algorithm=knn_clsf.algorithm + '_knn_search',
stages=['training', 'search'], params=params,
functions=['knn_clsf.fit', 'knn_clsf.kneighbors'],
times=[train_time, predict_time],
metrics=[], metric_type=None,
data=[X_train, X_test], alg_instance=knn_clsf)
| 46.580247 | 82 | 0.588126 |
d056cf255e6503787ab3973175c14d8327b8ca04
| 13,869 |
py
|
Python
|
rllib/agents/impala/vtrace_tf_policy.py
|
daobook/ray
|
af9f1ef4dc160e0671206556b387f8017f3c3930
|
[
"Apache-2.0"
] | 33 |
2020-05-27T14:25:24.000Z
|
2022-03-22T06:11:30.000Z
|
rllib/agents/impala/vtrace_tf_policy.py
|
daobook/ray
|
af9f1ef4dc160e0671206556b387f8017f3c3930
|
[
"Apache-2.0"
] | 227 |
2021-10-01T08:00:01.000Z
|
2021-12-28T16:47:26.000Z
|
rllib/agents/impala/vtrace_tf_policy.py
|
gramhagen/ray
|
c18caa4db36d466718bdbcb2229aa0b2dc03da1f
|
[
"Apache-2.0"
] | 5 |
2020-08-06T15:53:07.000Z
|
2022-02-09T03:31:31.000Z
|
"""Adapted from A3CTFPolicy to add V-trace.
Keep in sync with changes to A3CTFPolicy and VtraceSurrogatePolicy."""
import numpy as np
import logging
import gym
import ray
from ray.rllib.agents.impala import vtrace_tf as vtrace
from ray.rllib.models.tf.tf_action_dist import Categorical
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.policy.tf_policy import LearningRateSchedule, \
EntropyCoeffSchedule
from ray.rllib.utils import force_list
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.tf_utils import explained_variance
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
class VTraceLoss:
def __init__(self,
actions,
actions_logp,
actions_entropy,
dones,
behaviour_action_logp,
behaviour_logits,
target_logits,
discount,
rewards,
values,
bootstrap_value,
dist_class,
model,
valid_mask,
config,
vf_loss_coeff=0.5,
entropy_coeff=0.01,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0):
"""Policy gradient loss with vtrace importance weighting.
VTraceLoss takes tensors of shape [T, B, ...], where `B` is the
batch_size. The reason we need to know `B` is for V-trace to properly
handle episode cut boundaries.
Args:
actions: An int|float32 tensor of shape [T, B, ACTION_SPACE].
actions_logp: A float32 tensor of shape [T, B].
actions_entropy: A float32 tensor of shape [T, B].
dones: A bool tensor of shape [T, B].
behaviour_action_logp: Tensor of shape [T, B].
behaviour_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
target_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
discount: A float32 scalar.
rewards: A float32 tensor of shape [T, B].
values: A float32 tensor of shape [T, B].
bootstrap_value: A float32 tensor of shape [B].
dist_class: action distribution class for logits.
valid_mask: A bool tensor of valid RNN input elements (#2992).
config: Trainer config dict.
"""
# Compute vtrace on the CPU for better perf.
with tf.device("/cpu:0"):
self.vtrace_returns = vtrace.multi_from_logits(
behaviour_action_log_probs=behaviour_action_logp,
behaviour_policy_logits=behaviour_logits,
target_policy_logits=target_logits,
actions=tf.unstack(actions, axis=2),
discounts=tf.cast(~tf.cast(dones, tf.bool), tf.float32) *
discount,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
dist_class=dist_class,
model=model,
clip_rho_threshold=tf.cast(clip_rho_threshold, tf.float32),
clip_pg_rho_threshold=tf.cast(clip_pg_rho_threshold,
tf.float32))
self.value_targets = self.vtrace_returns.vs
# The policy gradients loss.
masked_pi_loss = tf.boolean_mask(
actions_logp * self.vtrace_returns.pg_advantages, valid_mask)
self.pi_loss = -tf.reduce_sum(masked_pi_loss)
self.mean_pi_loss = -tf.reduce_mean(masked_pi_loss)
# The baseline loss.
delta = tf.boolean_mask(values - self.vtrace_returns.vs, valid_mask)
delta_squarred = tf.math.square(delta)
self.vf_loss = 0.5 * tf.reduce_sum(delta_squarred)
self.mean_vf_loss = 0.5 * tf.reduce_mean(delta_squarred)
# The entropy loss.
masked_entropy = tf.boolean_mask(actions_entropy, valid_mask)
self.entropy = tf.reduce_sum(masked_entropy)
self.mean_entropy = tf.reduce_mean(masked_entropy)
# The summed weighted loss.
self.total_loss = self.pi_loss - self.entropy * entropy_coeff
# Optional vf loss (or in a separate term due to separate
# optimizers/networks).
self.loss_wo_vf = self.total_loss
if not config["_separate_vf_optimizer"]:
self.total_loss += self.vf_loss * vf_loss_coeff
def _make_time_major(policy, seq_lens, tensor, drop_last=False):
"""Swaps batch and trajectory axis.
Args:
policy: Policy reference
seq_lens: Sequence lengths if recurrent or None
tensor: A tensor or list of tensors to reshape.
drop_last: A bool indicating whether to drop the last
trajectory item.
Returns:
res: A tensor with swapped axes or a list of tensors with
swapped axes.
"""
if isinstance(tensor, list):
return [
_make_time_major(policy, seq_lens, t, drop_last) for t in tensor
]
if policy.is_recurrent():
B = tf.shape(seq_lens)[0]
T = tf.shape(tensor)[0] // B
else:
# Important: chop the tensor into batches at known episode cut
# boundaries.
# TODO: (sven) this is kind of a hack and won't work for
# batch_mode=complete_episodes.
T = policy.config["rollout_fragment_length"]
B = tf.shape(tensor)[0] // T
rs = tf.reshape(tensor, tf.concat([[B, T], tf.shape(tensor)[1:]], axis=0))
# swap B and T axes
res = tf.transpose(
rs, [1, 0] + list(range(2, 1 + int(tf.shape(tensor).shape[0]))))
if drop_last:
return res[:-1]
return res
def build_vtrace_loss(policy, model, dist_class, train_batch):
model_out, _ = model(train_batch)
action_dist = dist_class(model_out, model)
if isinstance(policy.action_space, gym.spaces.Discrete):
is_multidiscrete = False
output_hidden_shape = [policy.action_space.n]
elif isinstance(policy.action_space, gym.spaces.MultiDiscrete):
is_multidiscrete = True
output_hidden_shape = policy.action_space.nvec.astype(np.int32)
else:
is_multidiscrete = False
output_hidden_shape = 1
def make_time_major(*args, **kw):
return _make_time_major(policy, train_batch.get(SampleBatch.SEQ_LENS),
*args, **kw)
actions = train_batch[SampleBatch.ACTIONS]
dones = train_batch[SampleBatch.DONES]
rewards = train_batch[SampleBatch.REWARDS]
behaviour_action_logp = train_batch[SampleBatch.ACTION_LOGP]
behaviour_logits = train_batch[SampleBatch.ACTION_DIST_INPUTS]
unpacked_behaviour_logits = tf.split(
behaviour_logits, output_hidden_shape, axis=1)
unpacked_outputs = tf.split(model_out, output_hidden_shape, axis=1)
values = model.value_function()
if policy.is_recurrent():
max_seq_len = tf.reduce_max(train_batch[SampleBatch.SEQ_LENS])
mask = tf.sequence_mask(train_batch[SampleBatch.SEQ_LENS], max_seq_len)
mask = tf.reshape(mask, [-1])
else:
mask = tf.ones_like(rewards)
# Prepare actions for loss
loss_actions = actions if is_multidiscrete else tf.expand_dims(
actions, axis=1)
# Inputs are reshaped from [B * T] => [(T|T-1), B] for V-trace calc.
drop_last = policy.config["vtrace_drop_last_ts"]
policy.loss = VTraceLoss(
actions=make_time_major(loss_actions, drop_last=drop_last),
actions_logp=make_time_major(
action_dist.logp(actions), drop_last=drop_last),
actions_entropy=make_time_major(
action_dist.multi_entropy(), drop_last=drop_last),
dones=make_time_major(dones, drop_last=drop_last),
behaviour_action_logp=make_time_major(
behaviour_action_logp, drop_last=drop_last),
behaviour_logits=make_time_major(
unpacked_behaviour_logits, drop_last=drop_last),
target_logits=make_time_major(unpacked_outputs, drop_last=drop_last),
discount=policy.config["gamma"],
rewards=make_time_major(rewards, drop_last=drop_last),
values=make_time_major(values, drop_last=drop_last),
bootstrap_value=make_time_major(values)[-1],
dist_class=Categorical if is_multidiscrete else dist_class,
model=model,
valid_mask=make_time_major(mask, drop_last=drop_last),
config=policy.config,
vf_loss_coeff=policy.config["vf_loss_coeff"],
entropy_coeff=policy.entropy_coeff,
clip_rho_threshold=policy.config["vtrace_clip_rho_threshold"],
clip_pg_rho_threshold=policy.config["vtrace_clip_pg_rho_threshold"])
if policy.config.get("_separate_vf_optimizer"):
return policy.loss.loss_wo_vf, policy.loss.vf_loss
else:
return policy.loss.total_loss
def stats(policy, train_batch):
drop_last = policy.config["vtrace"] and \
policy.config["vtrace_drop_last_ts"]
values_batched = _make_time_major(
policy,
train_batch.get(SampleBatch.SEQ_LENS),
policy.model.value_function(),
drop_last=drop_last)
return {
"cur_lr": tf.cast(policy.cur_lr, tf.float64),
"policy_loss": policy.loss.mean_pi_loss,
"entropy": policy.loss.mean_entropy,
"entropy_coeff": tf.cast(policy.entropy_coeff, tf.float64),
"var_gnorm": tf.linalg.global_norm(policy.model.trainable_variables()),
"vf_loss": policy.loss.mean_vf_loss,
"vf_explained_var": explained_variance(
tf.reshape(policy.loss.value_targets, [-1]),
tf.reshape(values_batched, [-1]))
}
def grad_stats(policy, train_batch, grads):
# We have support for more than one loss (list of lists of grads).
if policy.config.get("_tf_policy_handles_more_than_one_loss"):
grad_gnorm = [tf.linalg.global_norm(g) for g in grads]
# Old case: We have a single list of grads (only one loss term and
# optimizer).
else:
grad_gnorm = tf.linalg.global_norm(grads)
return {
"grad_gnorm": grad_gnorm,
}
def choose_optimizer(policy, config):
if policy.config["opt_type"] == "adam":
if policy.config["framework"] in ["tf2", "tfe"]:
optim = tf.keras.optimizers.Adam(policy.cur_lr)
if policy.config["_separate_vf_optimizer"]:
return optim, tf.keras.optimizers.Adam(policy.config["_lr_vf"])
else:
optim = tf1.train.AdamOptimizer(policy.cur_lr)
if policy.config["_separate_vf_optimizer"]:
return optim, tf1.train.AdamOptimizer(policy.config["_lr_vf"])
else:
if policy.config["_separate_vf_optimizer"]:
raise ValueError("RMSProp optimizer not supported for separate"
"vf- and policy losses yet! Set `opt_type=adam`")
if tfv == 2:
optim = tf.keras.optimizers.RMSprop(policy.cur_lr, config["decay"],
config["momentum"],
config["epsilon"])
else:
optim = tf1.train.RMSPropOptimizer(policy.cur_lr, config["decay"],
config["momentum"],
config["epsilon"])
return optim
def clip_gradients(policy, optimizer, loss):
# Supporting more than one loss/optimizer.
if policy.config["_tf_policy_handles_more_than_one_loss"]:
optimizers = force_list(optimizer)
losses = force_list(loss)
assert len(optimizers) == len(losses)
clipped_grads_and_vars = []
for optim, loss_ in zip(optimizers, losses):
grads_and_vars = optim.compute_gradients(
loss_, policy.model.trainable_variables())
clipped_g_and_v = []
for g, v in grads_and_vars:
if g is not None:
clipped_g, _ = tf.clip_by_global_norm(
[g], policy.config["grad_clip"])
clipped_g_and_v.append((clipped_g[0], v))
clipped_grads_and_vars.append(clipped_g_and_v)
policy.grads = [
g for g_and_v in clipped_grads_and_vars for (g, v) in g_and_v
]
# Only one optimizer and and loss term.
else:
grads_and_vars = optimizer.compute_gradients(
loss, policy.model.trainable_variables())
grads = [g for (g, v) in grads_and_vars]
policy.grads, _ = tf.clip_by_global_norm(grads,
policy.config["grad_clip"])
clipped_grads_and_vars = list(
zip(policy.grads, policy.model.trainable_variables()))
return clipped_grads_and_vars
def setup_mixins(policy, obs_space, action_space, config):
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"],
config["entropy_coeff_schedule"])
VTraceTFPolicy = build_tf_policy(
name="VTraceTFPolicy",
get_default_config=lambda: ray.rllib.agents.impala.impala.DEFAULT_CONFIG,
loss_fn=build_vtrace_loss,
stats_fn=stats,
grad_stats_fn=grad_stats,
optimizer_fn=choose_optimizer,
compute_gradients_fn=clip_gradients,
before_loss_init=setup_mixins,
mixins=[LearningRateSchedule, EntropyCoeffSchedule],
get_batch_divisibility_req=lambda p: p.config["rollout_fragment_length"])
| 39.739255 | 79 | 0.632562 |
ee27a6750b5d0433b2dfd4e2fad2450de84a3fb4
| 1,713 |
py
|
Python
|
predict.py
|
Dr-Awkward/UdacityImageClassifier
|
7daf65f62c0bd8e23467472e4bea18fbd1323636
|
[
"MIT"
] | null | null | null |
predict.py
|
Dr-Awkward/UdacityImageClassifier
|
7daf65f62c0bd8e23467472e4bea18fbd1323636
|
[
"MIT"
] | null | null | null |
predict.py
|
Dr-Awkward/UdacityImageClassifier
|
7daf65f62c0bd8e23467472e4bea18fbd1323636
|
[
"MIT"
] | null | null | null |
import nnModel
import torchvision
from torchvision import datasets, transforms, models
import helpers.JsonLoader
import argparse
#######################################################
# Load a saved model and predict a flower image
# 1. Get the directory to the image that we will infer on
# 2. Get the directory to the save checkpoint
# 3. Get the number of most likley classes to show
# 4. Get the name of the json file to map categories to real names
# 5. Choose GPU for inference
# Create the parser and add the arguments
parser = argparse.ArgumentParser(description="Load a Neural Network to use for inference")
parser.add_argument('data_directory',
help="The relative path to the image files to infer on.")
parser.add_argument('checkpoint',
help="The relative path to the neural network checkpoint as a pth file.")
parser.add_argument('--top_k', default=1, type=int,
help="The amount of most likley classes to return for the predictions")
parser.add_argument('--category_names', default = './cat_to_name.json',
help="The json file (including file path) to load category names")
parser.add_argument('--gpu', default=False, action='store_true',
help="Boolean to infer with the gpu")
# Collect the arguments
args = parser.parse_args()
data_directory = args.data_directory
checkpoint = args.checkpoint
top_k = args.top_k
category_name = args.category_names
use_gpu = args.gpu
# Load the model
model = nnModel.load_model(checkpoint)
# Load the content of the json file
categories = helpers.JsonLoader.load_json(category_name)
# Predict
nnModel.predict(categories, data_directory, model, use_gpu, top_k)
| 38.066667 | 93 | 0.71045 |
964dac6a052ed1b2db006714c7503b972ee6a0cf
| 2,040 |
py
|
Python
|
tests/zquantum/core/history/artifact_storage_test.py
|
alexjuda2/z-quantum-core
|
c258100dbd091f0b22495b77b36399426ae9abac
|
[
"Apache-2.0"
] | 24 |
2020-04-15T17:36:59.000Z
|
2022-01-25T05:02:14.000Z
|
tests/zquantum/core/history/artifact_storage_test.py
|
alexjuda2/z-quantum-core
|
c258100dbd091f0b22495b77b36399426ae9abac
|
[
"Apache-2.0"
] | 177 |
2020-04-23T15:19:59.000Z
|
2022-03-30T18:06:17.000Z
|
tests/zquantum/core/history/artifact_storage_test.py
|
alexjuda2/z-quantum-core
|
c258100dbd091f0b22495b77b36399426ae9abac
|
[
"Apache-2.0"
] | 19 |
2020-06-24T10:56:02.000Z
|
2021-09-30T13:02:21.000Z
|
"""Test cases for storing artifacts and recording functions that store them."""
import numpy as np
import pytest
from zquantum.core.history.example_functions import Function5, function_3, function_4
from zquantum.core.history.recorder import ArtifactCollection, recorder, store_artifact
from zquantum.core.history.save_conditions import every_nth
from zquantum.core.interfaces.functions import CallableStoringArtifacts
def test_store_artifact_by_default_does_not_force_artifacts_storage():
artifacts = ArtifactCollection()
store_artifact(artifacts)("bitstring", "1111")
assert not artifacts.forced
@pytest.mark.parametrize(
"source_function,params_sequence,expected_artifacts",
[
(
function_3,
[3, 4, 5],
[{"bitstring": string} for string in ["11", "100", "101"]],
),
(
function_4,
[0, 10, 21],
[{"bitstring": string} for string in ["0", "1010", "10101"]],
),
(
Function5(2),
[np.array([1.5, 2, 3]), np.array([4.0, 2.5, 3.0])],
[{"something": 3.5}, {"something": 6.5}],
),
],
)
def test_recorder_stores_all_artifacts_by_default(
source_function: CallableStoringArtifacts, params_sequence, expected_artifacts
):
function = recorder(source_function)
for param in params_sequence:
function(param)
assert [entry.artifacts for entry in function.history] == expected_artifacts
def test_recorder_stores_history_entry_if_artifact_is_force_stored():
function = recorder(function_4, save_condition=every_nth(5))
for n in [0, 2, 3, 5, 7, 5]:
function(n)
assert [entry.call_number for entry in function.history] == [0, 1, 5]
assert [entry.value for entry in function.history] == [0, 4, 10]
assert [entry.params for entry in function.history] == [0, 2, 5]
assert [entry.artifacts for entry in function.history] == [
{"bitstring": "0"},
{"bitstring": "10"},
{"bitstring": "101"},
]
| 34.576271 | 87 | 0.657353 |
f6ca6423fa22d1a1f2c1bed5c669e5caa674f5d6
| 48,740 |
py
|
Python
|
nebula2_fork/graph/ttypes.py
|
linzhiming0826/nebula-python
|
0c508aa9780981c1fef2bf826d624a69ca2c64b0
|
[
"Apache-2.0"
] | null | null | null |
nebula2_fork/graph/ttypes.py
|
linzhiming0826/nebula-python
|
0c508aa9780981c1fef2bf826d624a69ca2c64b0
|
[
"Apache-2.0"
] | null | null | null |
nebula2_fork/graph/ttypes.py
|
linzhiming0826/nebula-python
|
0c508aa9780981c1fef2bf826d624a69ca2c64b0
|
[
"Apache-2.0"
] | null | null | null |
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
from __future__ import absolute_import
import six
import sys
from nebula2_fork.fbthrift.util.Recursive import fix_spec
from nebula2_fork.fbthrift.Thrift import TType, TMessageType, TPriority, TRequestContext, TProcessorEventHandler, TServerInterface, TProcessor, TException, TApplicationException, UnimplementedTypedef
from nebula2_fork.fbthrift.protocol.TProtocol import TProtocolException
import nebula2_fork.common.ttypes
import pprint
import warnings
from nebula2_fork.fbthrift import Thrift
from nebula2_fork.fbthrift.transport import TTransport
from nebula2_fork.fbthrift.protocol import TBinaryProtocol
from nebula2_fork.fbthrift.protocol import TCompactProtocol
from nebula2_fork.fbthrift.protocol import THeaderProtocol
fastproto = None
try:
from nebula2_fork.fbthrift.protocol import fastproto
except ImportError:
pass
all_structs = []
UTF8STRINGS = bool(0) or sys.version_info.major >= 3
__all__ = ['UTF8STRINGS', 'ProfilingStats', 'PlanNodeBranchInfo', 'Pair', 'PlanNodeDescription', 'PlanDescription', 'ExecutionResponse', 'AuthResponse']
class ProfilingStats:
"""
Attributes:
- rows
- exec_duration_in_us
- total_duration_in_us
- other_stats
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.rows = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.exec_duration_in_us = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.total_duration_in_us = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.other_stats = {}
(_ktype1, _vtype2, _size0 ) = iprot.readMapBegin()
if _size0 >= 0:
for _i4 in six.moves.range(_size0):
_key5 = iprot.readString()
_val6 = iprot.readString()
self.other_stats[_key5] = _val6
else:
while iprot.peekMap():
_key7 = iprot.readString()
_val8 = iprot.readString()
self.other_stats[_key7] = _val8
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('ProfilingStats')
if self.rows != None:
oprot.writeFieldBegin('rows', TType.I64, 1)
oprot.writeI64(self.rows)
oprot.writeFieldEnd()
if self.exec_duration_in_us != None:
oprot.writeFieldBegin('exec_duration_in_us', TType.I64, 2)
oprot.writeI64(self.exec_duration_in_us)
oprot.writeFieldEnd()
if self.total_duration_in_us != None:
oprot.writeFieldBegin('total_duration_in_us', TType.I64, 3)
oprot.writeI64(self.total_duration_in_us)
oprot.writeFieldEnd()
if self.other_stats != None:
oprot.writeFieldBegin('other_stats', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.other_stats))
for kiter9,viter10 in self.other_stats.items():
oprot.writeString(kiter9)
oprot.writeString(viter10)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.rows is not None:
value = pprint.pformat(self.rows, indent=0)
value = padding.join(value.splitlines(True))
L.append(' rows=%s' % (value))
if self.exec_duration_in_us is not None:
value = pprint.pformat(self.exec_duration_in_us, indent=0)
value = padding.join(value.splitlines(True))
L.append(' exec_duration_in_us=%s' % (value))
if self.total_duration_in_us is not None:
value = pprint.pformat(self.total_duration_in_us, indent=0)
value = padding.join(value.splitlines(True))
L.append(' total_duration_in_us=%s' % (value))
if self.other_stats is not None:
value = pprint.pformat(self.other_stats, indent=0)
value = padding.join(value.splitlines(True))
L.append(' other_stats=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class PlanNodeBranchInfo:
"""
Attributes:
- is_do_branch
- condition_node_id
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.is_do_branch = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.condition_node_id = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('PlanNodeBranchInfo')
if self.is_do_branch != None:
oprot.writeFieldBegin('is_do_branch', TType.BOOL, 1)
oprot.writeBool(self.is_do_branch)
oprot.writeFieldEnd()
if self.condition_node_id != None:
oprot.writeFieldBegin('condition_node_id', TType.I64, 2)
oprot.writeI64(self.condition_node_id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.is_do_branch is not None:
value = pprint.pformat(self.is_do_branch, indent=0)
value = padding.join(value.splitlines(True))
L.append(' is_do_branch=%s' % (value))
if self.condition_node_id is not None:
value = pprint.pformat(self.condition_node_id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' condition_node_id=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class Pair:
"""
Attributes:
- key
- value
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.value = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('Pair')
if self.key != None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key)
oprot.writeFieldEnd()
if self.value != None:
oprot.writeFieldBegin('value', TType.STRING, 2)
oprot.writeString(self.value)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.key is not None:
value = pprint.pformat(self.key, indent=0)
value = padding.join(value.splitlines(True))
L.append(' key=%s' % (value))
if self.value is not None:
value = pprint.pformat(self.value, indent=0)
value = padding.join(value.splitlines(True))
L.append(' value=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class PlanNodeDescription:
"""
Attributes:
- name
- id
- output_var
- description
- profiles
- branch_info
- dependencies
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.output_var = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.description = []
(_etype14, _size11) = iprot.readListBegin()
if _size11 >= 0:
for _i15 in six.moves.range(_size11):
_elem16 = Pair()
_elem16.read(iprot)
self.description.append(_elem16)
else:
while iprot.peekList():
_elem17 = Pair()
_elem17.read(iprot)
self.description.append(_elem17)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.profiles = []
(_etype21, _size18) = iprot.readListBegin()
if _size18 >= 0:
for _i22 in six.moves.range(_size18):
_elem23 = ProfilingStats()
_elem23.read(iprot)
self.profiles.append(_elem23)
else:
while iprot.peekList():
_elem24 = ProfilingStats()
_elem24.read(iprot)
self.profiles.append(_elem24)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.branch_info = PlanNodeBranchInfo()
self.branch_info.read(iprot)
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.LIST:
self.dependencies = []
(_etype28, _size25) = iprot.readListBegin()
if _size25 >= 0:
for _i29 in six.moves.range(_size25):
_elem30 = iprot.readI64()
self.dependencies.append(_elem30)
else:
while iprot.peekList():
_elem31 = iprot.readI64()
self.dependencies.append(_elem31)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('PlanNodeDescription')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.id != None:
oprot.writeFieldBegin('id', TType.I64, 2)
oprot.writeI64(self.id)
oprot.writeFieldEnd()
if self.output_var != None:
oprot.writeFieldBegin('output_var', TType.STRING, 3)
oprot.writeString(self.output_var)
oprot.writeFieldEnd()
if self.description != None:
oprot.writeFieldBegin('description', TType.LIST, 4)
oprot.writeListBegin(TType.STRUCT, len(self.description))
for iter32 in self.description:
iter32.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.profiles != None:
oprot.writeFieldBegin('profiles', TType.LIST, 5)
oprot.writeListBegin(TType.STRUCT, len(self.profiles))
for iter33 in self.profiles:
iter33.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.branch_info != None:
oprot.writeFieldBegin('branch_info', TType.STRUCT, 6)
self.branch_info.write(oprot)
oprot.writeFieldEnd()
if self.dependencies != None:
oprot.writeFieldBegin('dependencies', TType.LIST, 7)
oprot.writeListBegin(TType.I64, len(self.dependencies))
for iter34 in self.dependencies:
oprot.writeI64(iter34)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.name is not None:
value = pprint.pformat(self.name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' name=%s' % (value))
if self.id is not None:
value = pprint.pformat(self.id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' id=%s' % (value))
if self.output_var is not None:
value = pprint.pformat(self.output_var, indent=0)
value = padding.join(value.splitlines(True))
L.append(' output_var=%s' % (value))
if self.description is not None:
value = pprint.pformat(self.description, indent=0)
value = padding.join(value.splitlines(True))
L.append(' description=%s' % (value))
if self.profiles is not None:
value = pprint.pformat(self.profiles, indent=0)
value = padding.join(value.splitlines(True))
L.append(' profiles=%s' % (value))
if self.branch_info is not None:
value = pprint.pformat(self.branch_info, indent=0)
value = padding.join(value.splitlines(True))
L.append(' branch_info=%s' % (value))
if self.dependencies is not None:
value = pprint.pformat(self.dependencies, indent=0)
value = padding.join(value.splitlines(True))
L.append(' dependencies=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class PlanDescription:
"""
Attributes:
- plan_node_descs
- node_index_map
- format
- optimize_time_in_us
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.plan_node_descs = []
(_etype38, _size35) = iprot.readListBegin()
if _size35 >= 0:
for _i39 in six.moves.range(_size35):
_elem40 = PlanNodeDescription()
_elem40.read(iprot)
self.plan_node_descs.append(_elem40)
else:
while iprot.peekList():
_elem41 = PlanNodeDescription()
_elem41.read(iprot)
self.plan_node_descs.append(_elem41)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.node_index_map = {}
(_ktype43, _vtype44, _size42 ) = iprot.readMapBegin()
if _size42 >= 0:
for _i46 in six.moves.range(_size42):
_key47 = iprot.readI64()
_val48 = iprot.readI64()
self.node_index_map[_key47] = _val48
else:
while iprot.peekMap():
_key49 = iprot.readI64()
_val50 = iprot.readI64()
self.node_index_map[_key49] = _val50
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.format = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.optimize_time_in_us = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('PlanDescription')
if self.plan_node_descs != None:
oprot.writeFieldBegin('plan_node_descs', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.plan_node_descs))
for iter51 in self.plan_node_descs:
iter51.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.node_index_map != None:
oprot.writeFieldBegin('node_index_map', TType.MAP, 2)
oprot.writeMapBegin(TType.I64, TType.I64, len(self.node_index_map))
for kiter52,viter53 in self.node_index_map.items():
oprot.writeI64(kiter52)
oprot.writeI64(viter53)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.format != None:
oprot.writeFieldBegin('format', TType.STRING, 3)
oprot.writeString(self.format)
oprot.writeFieldEnd()
if self.optimize_time_in_us != None:
oprot.writeFieldBegin('optimize_time_in_us', TType.I32, 4)
oprot.writeI32(self.optimize_time_in_us)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.plan_node_descs is not None:
value = pprint.pformat(self.plan_node_descs, indent=0)
value = padding.join(value.splitlines(True))
L.append(' plan_node_descs=%s' % (value))
if self.node_index_map is not None:
value = pprint.pformat(self.node_index_map, indent=0)
value = padding.join(value.splitlines(True))
L.append(' node_index_map=%s' % (value))
if self.format is not None:
value = pprint.pformat(self.format, indent=0)
value = padding.join(value.splitlines(True))
L.append(' format=%s' % (value))
if self.optimize_time_in_us is not None:
value = pprint.pformat(self.optimize_time_in_us, indent=0)
value = padding.join(value.splitlines(True))
L.append(' optimize_time_in_us=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class ExecutionResponse:
"""
Attributes:
- error_code
- latency_in_us
- data
- space_name
- error_msg
- plan_desc
- comment
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.error_code = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.latency_in_us = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.data = nebula2_fork.common.ttypes.DataSet()
self.data.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.space_name = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.error_msg = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.plan_desc = PlanDescription()
self.plan_desc.read(iprot)
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.comment = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('ExecutionResponse')
if self.error_code != None:
oprot.writeFieldBegin('error_code', TType.I32, 1)
oprot.writeI32(self.error_code)
oprot.writeFieldEnd()
if self.latency_in_us != None:
oprot.writeFieldBegin('latency_in_us', TType.I32, 2)
oprot.writeI32(self.latency_in_us)
oprot.writeFieldEnd()
if self.data != None:
oprot.writeFieldBegin('data', TType.STRUCT, 3)
self.data.write(oprot)
oprot.writeFieldEnd()
if self.space_name != None:
oprot.writeFieldBegin('space_name', TType.STRING, 4)
oprot.writeString(self.space_name)
oprot.writeFieldEnd()
if self.error_msg != None:
oprot.writeFieldBegin('error_msg', TType.STRING, 5)
oprot.writeString(self.error_msg)
oprot.writeFieldEnd()
if self.plan_desc != None:
oprot.writeFieldBegin('plan_desc', TType.STRUCT, 6)
self.plan_desc.write(oprot)
oprot.writeFieldEnd()
if self.comment != None:
oprot.writeFieldBegin('comment', TType.STRING, 7)
oprot.writeString(self.comment)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.error_code is not None:
value = pprint.pformat(self.error_code, indent=0)
value = padding.join(value.splitlines(True))
L.append(' error_code=%s' % (value))
if self.latency_in_us is not None:
value = pprint.pformat(self.latency_in_us, indent=0)
value = padding.join(value.splitlines(True))
L.append(' latency_in_us=%s' % (value))
if self.data is not None:
value = pprint.pformat(self.data, indent=0)
value = padding.join(value.splitlines(True))
L.append(' data=%s' % (value))
if self.space_name is not None:
value = pprint.pformat(self.space_name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' space_name=%s' % (value))
if self.error_msg is not None:
value = pprint.pformat(self.error_msg, indent=0)
value = padding.join(value.splitlines(True))
L.append(' error_msg=%s' % (value))
if self.plan_desc is not None:
value = pprint.pformat(self.plan_desc, indent=0)
value = padding.join(value.splitlines(True))
L.append(' plan_desc=%s' % (value))
if self.comment is not None:
value = pprint.pformat(self.comment, indent=0)
value = padding.join(value.splitlines(True))
L.append(' comment=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class AuthResponse:
"""
Attributes:
- error_code
- error_msg
- session_id
- time_zone_offset_seconds
- time_zone_name
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.error_code = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.error_msg = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.session_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.time_zone_offset_seconds = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.time_zone_name = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('AuthResponse')
if self.error_code != None:
oprot.writeFieldBegin('error_code', TType.I32, 1)
oprot.writeI32(self.error_code)
oprot.writeFieldEnd()
if self.error_msg != None:
oprot.writeFieldBegin('error_msg', TType.STRING, 2)
oprot.writeString(self.error_msg)
oprot.writeFieldEnd()
if self.session_id != None:
oprot.writeFieldBegin('session_id', TType.I64, 3)
oprot.writeI64(self.session_id)
oprot.writeFieldEnd()
if self.time_zone_offset_seconds != None:
oprot.writeFieldBegin('time_zone_offset_seconds', TType.I32, 4)
oprot.writeI32(self.time_zone_offset_seconds)
oprot.writeFieldEnd()
if self.time_zone_name != None:
oprot.writeFieldBegin('time_zone_name', TType.STRING, 5)
oprot.writeString(self.time_zone_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.error_code is not None:
value = pprint.pformat(self.error_code, indent=0)
value = padding.join(value.splitlines(True))
L.append(' error_code=%s' % (value))
if self.error_msg is not None:
value = pprint.pformat(self.error_msg, indent=0)
value = padding.join(value.splitlines(True))
L.append(' error_msg=%s' % (value))
if self.session_id is not None:
value = pprint.pformat(self.session_id, indent=0)
value = padding.join(value.splitlines(True))
L.append(' session_id=%s' % (value))
if self.time_zone_offset_seconds is not None:
value = pprint.pformat(self.time_zone_offset_seconds, indent=0)
value = padding.join(value.splitlines(True))
L.append(' time_zone_offset_seconds=%s' % (value))
if self.time_zone_name is not None:
value = pprint.pformat(self.time_zone_name, indent=0)
value = padding.join(value.splitlines(True))
L.append(' time_zone_name=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
all_structs.append(ProfilingStats)
ProfilingStats.thrift_spec = (
None, # 0
(1, TType.I64, 'rows', None, None, 0, ), # 1
(2, TType.I64, 'exec_duration_in_us', None, None, 0, ), # 2
(3, TType.I64, 'total_duration_in_us', None, None, 0, ), # 3
(4, TType.MAP, 'other_stats', (TType.STRING,False,TType.STRING,False), None, 1, ), # 4
)
ProfilingStats.thrift_struct_annotations = {
"cpp.type": "nebula::ProfilingStats",
}
ProfilingStats.thrift_field_annotations = {
}
def ProfilingStats__init__(self, rows=None, exec_duration_in_us=None, total_duration_in_us=None, other_stats=None,):
self.rows = rows
self.exec_duration_in_us = exec_duration_in_us
self.total_duration_in_us = total_duration_in_us
self.other_stats = other_stats
ProfilingStats.__init__ = ProfilingStats__init__
def ProfilingStats__setstate__(self, state):
state.setdefault('rows', None)
state.setdefault('exec_duration_in_us', None)
state.setdefault('total_duration_in_us', None)
state.setdefault('other_stats', None)
self.__dict__ = state
ProfilingStats.__getstate__ = lambda self: self.__dict__.copy()
ProfilingStats.__setstate__ = ProfilingStats__setstate__
all_structs.append(PlanNodeBranchInfo)
PlanNodeBranchInfo.thrift_spec = (
None, # 0
(1, TType.BOOL, 'is_do_branch', None, None, 0, ), # 1
(2, TType.I64, 'condition_node_id', None, None, 0, ), # 2
)
PlanNodeBranchInfo.thrift_struct_annotations = {
"cpp.type": "nebula::PlanNodeBranchInfo",
}
PlanNodeBranchInfo.thrift_field_annotations = {
}
def PlanNodeBranchInfo__init__(self, is_do_branch=None, condition_node_id=None,):
self.is_do_branch = is_do_branch
self.condition_node_id = condition_node_id
PlanNodeBranchInfo.__init__ = PlanNodeBranchInfo__init__
def PlanNodeBranchInfo__setstate__(self, state):
state.setdefault('is_do_branch', None)
state.setdefault('condition_node_id', None)
self.__dict__ = state
PlanNodeBranchInfo.__getstate__ = lambda self: self.__dict__.copy()
PlanNodeBranchInfo.__setstate__ = PlanNodeBranchInfo__setstate__
all_structs.append(Pair)
Pair.thrift_spec = (
None, # 0
(1, TType.STRING, 'key', False, None, 0, ), # 1
(2, TType.STRING, 'value', False, None, 0, ), # 2
)
Pair.thrift_struct_annotations = {
"cpp.type": "nebula::Pair",
}
Pair.thrift_field_annotations = {
}
def Pair__init__(self, key=None, value=None,):
self.key = key
self.value = value
Pair.__init__ = Pair__init__
def Pair__setstate__(self, state):
state.setdefault('key', None)
state.setdefault('value', None)
self.__dict__ = state
Pair.__getstate__ = lambda self: self.__dict__.copy()
Pair.__setstate__ = Pair__setstate__
all_structs.append(PlanNodeDescription)
PlanNodeDescription.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', False, None, 0, ), # 1
(2, TType.I64, 'id', None, None, 0, ), # 2
(3, TType.STRING, 'output_var', False, None, 0, ), # 3
(4, TType.LIST, 'description', (TType.STRUCT,[Pair, Pair.thrift_spec, False]), None, 1, ), # 4
(5, TType.LIST, 'profiles', (TType.STRUCT,[ProfilingStats, ProfilingStats.thrift_spec, False]), None, 1, ), # 5
(6, TType.STRUCT, 'branch_info', [PlanNodeBranchInfo, PlanNodeBranchInfo.thrift_spec, False], None, 1, ), # 6
(7, TType.LIST, 'dependencies', (TType.I64,None), None, 1, ), # 7
)
PlanNodeDescription.thrift_struct_annotations = {
"cpp.type": "nebula::PlanNodeDescription",
}
PlanNodeDescription.thrift_field_annotations = {
}
def PlanNodeDescription__init__(self, name=None, id=None, output_var=None, description=None, profiles=None, branch_info=None, dependencies=None,):
self.name = name
self.id = id
self.output_var = output_var
self.description = description
self.profiles = profiles
self.branch_info = branch_info
self.dependencies = dependencies
PlanNodeDescription.__init__ = PlanNodeDescription__init__
def PlanNodeDescription__setstate__(self, state):
state.setdefault('name', None)
state.setdefault('id', None)
state.setdefault('output_var', None)
state.setdefault('description', None)
state.setdefault('profiles', None)
state.setdefault('branch_info', None)
state.setdefault('dependencies', None)
self.__dict__ = state
PlanNodeDescription.__getstate__ = lambda self: self.__dict__.copy()
PlanNodeDescription.__setstate__ = PlanNodeDescription__setstate__
all_structs.append(PlanDescription)
PlanDescription.thrift_spec = (
None, # 0
(1, TType.LIST, 'plan_node_descs', (TType.STRUCT,[PlanNodeDescription, PlanNodeDescription.thrift_spec, False]), None, 0, ), # 1
(2, TType.MAP, 'node_index_map', (TType.I64,None,TType.I64,None), None, 0, ), # 2
(3, TType.STRING, 'format', False, None, 0, ), # 3
(4, TType.I32, 'optimize_time_in_us', None, None, 0, ), # 4
)
PlanDescription.thrift_struct_annotations = {
"cpp.type": "nebula::PlanDescription",
}
PlanDescription.thrift_field_annotations = {
}
def PlanDescription__init__(self, plan_node_descs=None, node_index_map=None, format=None, optimize_time_in_us=None,):
self.plan_node_descs = plan_node_descs
self.node_index_map = node_index_map
self.format = format
self.optimize_time_in_us = optimize_time_in_us
PlanDescription.__init__ = PlanDescription__init__
def PlanDescription__setstate__(self, state):
state.setdefault('plan_node_descs', None)
state.setdefault('node_index_map', None)
state.setdefault('format', None)
state.setdefault('optimize_time_in_us', None)
self.__dict__ = state
PlanDescription.__getstate__ = lambda self: self.__dict__.copy()
PlanDescription.__setstate__ = PlanDescription__setstate__
all_structs.append(ExecutionResponse)
ExecutionResponse.thrift_spec = (
None, # 0
(1, TType.I32, 'error_code', nebula2_fork.common.ttypes.ErrorCode, None, 0, ), # 1
(2, TType.I32, 'latency_in_us', None, None, 0, ), # 2
(3, TType.STRUCT, 'data', [nebula2_fork.common.ttypes.DataSet, nebula2_fork.common.ttypes.DataSet.thrift_spec, False], None, 1, ), # 3
(4, TType.STRING, 'space_name', False, None, 1, ), # 4
(5, TType.STRING, 'error_msg', False, None, 1, ), # 5
(6, TType.STRUCT, 'plan_desc', [PlanDescription, PlanDescription.thrift_spec, False], None, 1, ), # 6
(7, TType.STRING, 'comment', False, None, 1, ), # 7
)
ExecutionResponse.thrift_struct_annotations = {
"cpp.type": "nebula::ExecutionResponse",
}
ExecutionResponse.thrift_field_annotations = {
}
def ExecutionResponse__init__(self, error_code=None, latency_in_us=None, data=None, space_name=None, error_msg=None, plan_desc=None, comment=None,):
self.error_code = error_code
self.latency_in_us = latency_in_us
self.data = data
self.space_name = space_name
self.error_msg = error_msg
self.plan_desc = plan_desc
self.comment = comment
ExecutionResponse.__init__ = ExecutionResponse__init__
def ExecutionResponse__setstate__(self, state):
state.setdefault('error_code', None)
state.setdefault('latency_in_us', None)
state.setdefault('data', None)
state.setdefault('space_name', None)
state.setdefault('error_msg', None)
state.setdefault('plan_desc', None)
state.setdefault('comment', None)
self.__dict__ = state
ExecutionResponse.__getstate__ = lambda self: self.__dict__.copy()
ExecutionResponse.__setstate__ = ExecutionResponse__setstate__
all_structs.append(AuthResponse)
AuthResponse.thrift_spec = (
None, # 0
(1, TType.I32, 'error_code', nebula2_fork.common.ttypes.ErrorCode, None, 0, ), # 1
(2, TType.STRING, 'error_msg', False, None, 1, ), # 2
(3, TType.I64, 'session_id', None, None, 1, ), # 3
(4, TType.I32, 'time_zone_offset_seconds', None, None, 1, ), # 4
(5, TType.STRING, 'time_zone_name', False, None, 1, ), # 5
)
AuthResponse.thrift_struct_annotations = {
"cpp.type": "nebula::AuthResponse",
}
AuthResponse.thrift_field_annotations = {
}
def AuthResponse__init__(self, error_code=None, error_msg=None, session_id=None, time_zone_offset_seconds=None, time_zone_name=None,):
self.error_code = error_code
self.error_msg = error_msg
self.session_id = session_id
self.time_zone_offset_seconds = time_zone_offset_seconds
self.time_zone_name = time_zone_name
AuthResponse.__init__ = AuthResponse__init__
def AuthResponse__setstate__(self, state):
state.setdefault('error_code', None)
state.setdefault('error_msg', None)
state.setdefault('session_id', None)
state.setdefault('time_zone_offset_seconds', None)
state.setdefault('time_zone_name', None)
self.__dict__ = state
AuthResponse.__getstate__ = lambda self: self.__dict__.copy()
AuthResponse.__setstate__ = AuthResponse__setstate__
fix_spec(all_structs)
del all_structs
| 40.01642 | 339 | 0.693086 |
7f8e0825b399905383ed05229e29b275f4aba6d9
| 5,848 |
py
|
Python
|
maxipago/managers/payment/recurring.py
|
joma-s/maxipago-sdk-python
|
16428e6aca78a18a4e2bf4e2b48cab1bc65177be
|
[
"MIT"
] | null | null | null |
maxipago/managers/payment/recurring.py
|
joma-s/maxipago-sdk-python
|
16428e6aca78a18a4e2bf4e2b48cab1bc65177be
|
[
"MIT"
] | null | null | null |
maxipago/managers/payment/recurring.py
|
joma-s/maxipago-sdk-python
|
16428e6aca78a18a4e2bf4e2b48cab1bc65177be
|
[
"MIT"
] | 1 |
2021-10-20T19:27:49.000Z
|
2021-10-20T19:27:49.000Z
|
# coding: utf-8
from maxipago.managers.base import ManagerTransaction, ManagerApi
from maxipago.requesters.payment import PaymentRecurringRequester
from maxipago.resources.payment import PaymentResource
from maxipago.resources.recurring import CancelResource, EditResource
class PaymentRecurringManager(ManagerTransaction):
def add(self, **kwargs):
fields = (
('processor_id', {'translated_name': 'processorID'}),
('reference_num', {'translated_name': 'referenceNum'}),
('ip_address', {'translated_name': 'ipAddress', 'required': False}),
('card_number', {'translated_name': 'transactionDetail/payType/creditCard/number'}),
('card_expiration_month', {'translated_name': 'transactionDetail/payType/creditCard/expMonth'}),
('card_expiration_year', {'translated_name': 'transactionDetail/payType/creditCard/expYear'}),
('card_cvv', {'translated_name': 'transactionDetail/payType/creditCard/cvvNumber', 'required': False}),
('charge_total', {'translated_name': 'payment/chargeTotal'}),
('currency_code', {'translated_name': 'payment/currencyCode', 'required': True}),
('recurring_action', {'translated_name': 'recurring/action', 'default': 'new'}),
('recurring_start', {'translated_name': 'recurring/startDate', 'required': False}),
('recurring_last', {'translated_name': 'recurring/lastDate', 'required': False}),
('recurring_frequency', {'translated_name': 'recurring/frequency'}),
('recurring_period', {'translated_name': 'recurring/period'}),
('recurring_first_amount', {'translated_name': 'recurring/firstAmount', 'required': False}),
('recurring_last_amount', {'translated_name': 'recurring/lastAmount', 'required': False}),
('recurring_installments', {'translated_name': 'recurring/installments'}),
('recurring_failure_threshold', {'translated_name': 'recurring/failureThreshold', 'required': False}),
)
requester = PaymentRecurringRequester(fields, kwargs)
return self.send(command='recurringPayment', requester=requester, resource=PaymentResource)
def delete(self, **kwargs):
fields = (
('order_id', {'translated_name': 'orderID'}),
)
requester = PaymentRecurringRequester(fields, kwargs)
manager = ManagerApi(maxid=self.maxid, api_key=self.api_key, api_version=self.api_version, sandbox=self.sandbox)
return manager.send(command='cancel-recurring', requester=requester, resource=CancelResource)
class PaymentRecurringManagerApi(ManagerApi):
def edit(self, **kwargs):
fields = (
('order_id', {'translated_name': 'orderID'}),
('card_number', {'translated_name': 'paymentInfo/cardInfo/creditCardNumber', 'required': False}),
('card_expiration_month', {'translated_name': 'paymentInfo/cardInfo/expirationMonth', 'required': False}),
('card_expiration_year', {'translated_name': 'paymentInfo/cardInfo/expirationYear', 'required': False}),
('card_soft_description', {'translated_name': 'paymentInfo/cardInfo/softDescriptor', 'required': False}),
('charge_total', {'translated_name': 'paymentInfo/chargeTotal'}),
('processor_id', {'translated_name': 'recurring/processorID', 'Required': False}),
('recurring_action', {'translated_name': 'recurring/action', 'default': 'enabled', 'required': True}),
('recurring_installments', {'translated_name': 'recurring/installments'}),
('recurring_next_date', {'translated_name': 'recurring/nextFireDate', 'required': False}),
('recurring_day', {'translated_name': 'recurring/fireDay', 'required': False}),
('recurring_period', {'translated_name': 'recurring/period', 'required': False}),
('recurring_last_date', {'translated_name': 'recurring/lastDate', 'required': False}),
('recurring_last_amount', {'translated_name': 'recurring/lastAmount', 'required': False}),
('billing_name', {'translated_name': 'billingInfo/name', 'required': False}),
('billing_address', {'translated_name': 'billingInfo/address1', 'required': False}),
('billing_address2', {'translated_name': 'billingInfo/address2', 'required': False}),
('billing_city', {'translated_name': 'billingInfo/city', 'required': False}),
('billing_postalcode', {'translated_name': 'billingInfo/zip', 'required': False}),
('billing_country', {'translated_name': 'billingInfo/country', 'required': False}),
('billing_email', {'translated_name': 'billingInfo/email', 'required': False}),
('billing_phone', {'translated_name': 'billingInfo/phone', 'required': False}),
('shipping_name', {'translated_name': 'shippingInfo/name', 'required': False}),
('shipping_address', {'translated_name': 'shippingInfo/address1', 'required': False}),
('shipping_address2', {'translated_name': 'shippingInfo/address2', 'required': False}),
('shipping_city', {'translated_name': 'shippingInfo/city', 'required': False}),
('shipping_postalcode', {'translated_name': 'shippingInfo/zip', 'required': False}),
('shipping_country', {'translated_name': 'shippingInfo/country', 'required': False}),
('shipping_email', {'translated_name': 'shippingInfo/email', 'required': False}),
('shipping_phone', {'translated_name': 'shippingInfo/phone', 'required': False}),
)
requester = PaymentRecurringRequester(fields, kwargs)
return self.send(command='modify-recurring', requester=requester, resource=EditResource)
| 62.88172 | 120 | 0.654412 |
e0d42391efe5d36f64510ec6d3785e8194a32107
| 1,659 |
py
|
Python
|
thirdparty/instant-meshes/instant-meshes-dust3d/ext/nanogui/ext/pybind11/tests/test_operator_overloading.py
|
MelvinG24/dust3d
|
c4936fd900a9a48220ebb811dfeaea0effbae3ee
|
[
"MIT"
] | 2,392 |
2016-12-17T14:14:12.000Z
|
2022-03-30T19:40:40.000Z
|
thirdparty/instant-meshes/instant-meshes-dust3d/ext/nanogui/ext/pybind11/tests/test_operator_overloading.py
|
MelvinG24/dust3d
|
c4936fd900a9a48220ebb811dfeaea0effbae3ee
|
[
"MIT"
] | 106 |
2018-04-19T17:47:31.000Z
|
2022-03-01T19:44:11.000Z
|
thirdparty/instant-meshes/instant-meshes-dust3d/ext/nanogui/ext/pybind11/tests/test_operator_overloading.py
|
MelvinG24/dust3d
|
c4936fd900a9a48220ebb811dfeaea0effbae3ee
|
[
"MIT"
] | 184 |
2017-11-15T09:55:37.000Z
|
2022-02-21T16:30:46.000Z
|
def test_operator_overloading():
from pybind11_tests import Vector2, Vector, ConstructorStats
v1 = Vector2(1, 2)
v2 = Vector(3, -1)
assert str(v1) == "[1.000000, 2.000000]"
assert str(v2) == "[3.000000, -1.000000]"
assert str(v1 + v2) == "[4.000000, 1.000000]"
assert str(v1 - v2) == "[-2.000000, 3.000000]"
assert str(v1 - 8) == "[-7.000000, -6.000000]"
assert str(v1 + 8) == "[9.000000, 10.000000]"
assert str(v1 * 8) == "[8.000000, 16.000000]"
assert str(v1 / 8) == "[0.125000, 0.250000]"
assert str(8 - v1) == "[7.000000, 6.000000]"
assert str(8 + v1) == "[9.000000, 10.000000]"
assert str(8 * v1) == "[8.000000, 16.000000]"
assert str(8 / v1) == "[8.000000, 4.000000]"
v1 += v2
v1 *= 2
assert str(v1) == "[8.000000, 2.000000]"
cstats = ConstructorStats.get(Vector2)
assert cstats.alive() == 2
del v1
assert cstats.alive() == 1
del v2
assert cstats.alive() == 0
assert cstats.values() == ['[1.000000, 2.000000]', '[3.000000, -1.000000]',
'[4.000000, 1.000000]', '[-2.000000, 3.000000]',
'[-7.000000, -6.000000]', '[9.000000, 10.000000]',
'[8.000000, 16.000000]', '[0.125000, 0.250000]',
'[7.000000, 6.000000]', '[9.000000, 10.000000]',
'[8.000000, 16.000000]', '[8.000000, 4.000000]']
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
assert cstats.move_constructions >= 10
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
| 39.5 | 81 | 0.537071 |
32d052655d26bf77f38daaa518d73819c6bf557a
| 1,486 |
py
|
Python
|
util/credits.py
|
jessescn/ControleAcademico
|
496cb6b40ca894cecff1f8966dbf0389be392095
|
[
"MIT"
] | 8 |
2019-09-25T18:24:24.000Z
|
2021-02-28T02:59:17.000Z
|
util/credits.py
|
jessescn/ControleAcademico
|
496cb6b40ca894cecff1f8966dbf0389be392095
|
[
"MIT"
] | 14 |
2019-09-19T17:13:48.000Z
|
2021-03-31T19:42:06.000Z
|
util/credits.py
|
jessescn/ControleAcademico
|
496cb6b40ca894cecff1f8966dbf0389be392095
|
[
"MIT"
] | 2 |
2019-10-02T19:07:00.000Z
|
2019-10-19T19:36:41.000Z
|
import re
from functools import reduce
def print_credits(subjects):
subjects = list(filter(approved, subjects))
mandatory = get_mandatory(subjects)
genaral_opt = get_general_optative(subjects)
specific_opt = get_specific_optative(subjects)
print(" Créditos\n")
print("Obrigatórios: (" + mandatory + "/132)")
print("Optativos Específicos: (" + specific_opt + "/40)")
print("Optativos Gerais: (" + genaral_opt + "/16)\n")
def get_general_optative(subjects):
optative = list(filter(general_optative, subjects))
return get_credits_sum(optative)
def get_specific_optative(subjects):
optative = list(filter(specific_optative, subjects))
return get_credits_sum(optative)
def get_mandatory(subjects):
mand = list(filter(mandatory, subjects))
return get_credits_sum(mand)
def get_credits_sum(subjects):
return reduce(sum_credits, subjects, {"creditos": "0"})["creditos"]
def sum_credits(subject1, subject2):
return {"creditos": str(int(subject1['creditos']) + int(subject2['creditos']))}
def approved(subject):
return subject['situacao'] == 'Aprovado' or subject['situacao'] == 'Dispensa'
def mandatory(subject):
return subject['tipo'] == 'Obrigatória'
def general_optative(subject):
return subject['tipo'] == 'Optativa' and not(re.search("\A141",subject['codigo']))
def specific_optative(subject):
return subject['tipo'] == 'Optativa' and (re.search("\A141",subject['codigo']))
| 33.022222 | 84 | 0.701884 |
082f046cbde749565b175802b98002bc565747b1
| 372 |
py
|
Python
|
Mini/newBase64.py
|
pjx206/PythonLearning
|
e00d5ceff36295adc15642d76f257120315f8941
|
[
"MIT"
] | null | null | null |
Mini/newBase64.py
|
pjx206/PythonLearning
|
e00d5ceff36295adc15642d76f257120315f8941
|
[
"MIT"
] | null | null | null |
Mini/newBase64.py
|
pjx206/PythonLearning
|
e00d5ceff36295adc15642d76f257120315f8941
|
[
"MIT"
] | null | null | null |
import base64
stdtable = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def decode_str(s, table):
global stdtable
transtab = str.maketrans(table, stdtable)
s = s.translate(transtab)
return base64.b64decode(bytes(s, encoding='ascii'))
def main():
print(decode_str('ywjJza==', table=))
if __name__ == '__main__':
main()
| 20.666667 | 77 | 0.704301 |
54f0631d7e92c3315efc013a924bf59dd75cf501
| 12,169 |
py
|
Python
|
lib/tool_shed/test/functional/test_0410_repository_component_review_access_control.py
|
quacksawbones/galaxy-1
|
65f7259b29d3886e526d9be670c60d9da9fbe038
|
[
"CC-BY-3.0"
] | 1,085 |
2015-02-18T16:14:38.000Z
|
2022-03-30T23:52:07.000Z
|
lib/tool_shed/test/functional/test_0410_repository_component_review_access_control.py
|
quacksawbones/galaxy-1
|
65f7259b29d3886e526d9be670c60d9da9fbe038
|
[
"CC-BY-3.0"
] | 11,253 |
2015-02-18T17:47:32.000Z
|
2022-03-31T21:47:03.000Z
|
lib/tool_shed/test/functional/test_0410_repository_component_review_access_control.py
|
quacksawbones/galaxy-1
|
65f7259b29d3886e526d9be670c60d9da9fbe038
|
[
"CC-BY-3.0"
] | 1,000 |
2015-02-18T16:18:10.000Z
|
2022-03-29T08:22:56.000Z
|
from ..base.twilltestcase import common, ShedTwillTestCase
repository_name = 'filtering_0410'
repository_description = 'Galaxy filtering tool for test 0410'
repository_long_description = 'Long description of Galaxy filtering tool for test 0410'
'''
1. Create a repository in the tool shed owned by test_user_1.
2. Have test_user_2 complete a review of the repository.
3. Have test_user_1 browse the review.
4. Have test_user_3 browse the repository and make sure they are not allowed to browse the review.
5. Have test_user_1 give write permission on the repository to the test_user_3.
6. Have test_user_3 browse the repository again and they should now have the ability to browse the review.
7. Have test_user_3 browse the review.
'''
class TestRepositoryComponentReviews(ShedTwillTestCase):
'''Test repository component review features.'''
def test_0000_initiate_users(self):
"""Create necessary user accounts and login as an admin user.
Create all the user accounts that are needed for this test script to run independently of other test.
Previously created accounts will not be re-created.
"""
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
test_user_1 = self.test_db_util.get_user(common.test_user_1_email)
assert test_user_1 is not None, f'Problem retrieving user with email {common.test_user_1_email} from the database'
self.test_db_util.get_private_role(test_user_1)
self.login(email=common.test_user_2_email, username=common.test_user_2_name)
test_user_2 = self.test_db_util.get_user(common.test_user_2_email)
assert test_user_2 is not None, f'Problem retrieving user with email {common.test_user_2_email} from the database'
self.test_db_util.get_private_role(test_user_2)
self.login(email=common.test_user_3_email, username=common.test_user_3_name)
test_user_3 = self.test_db_util.get_user(common.test_user_3_email)
assert test_user_3 is not None, f'Problem retrieving user with email {common.test_user_3_email} from the database'
self.test_db_util.get_private_role(test_user_3)
self.login(email=common.admin_email, username=common.admin_username)
admin_user = self.test_db_util.get_user(common.admin_email)
assert admin_user is not None, f'Problem retrieving user with email {common.admin_email} from the database'
self.test_db_util.get_private_role(admin_user)
def test_0005_grant_reviewer_role(self):
'''Grant the repository reviewer role to test_user_2.
We now have an admin user (admin_user) and three non-admin users (test_user_1, test_user_2, and test_user_3). Grant the repository
reviewer role to test_user_2, who will not be the owner of the reviewed repositories, and do not grant any roles to test_user_3 yet.
'''
reviewer_role = self.test_db_util.get_role_by_name('Repository Reviewer')
test_user_2 = self.test_db_util.get_user(common.test_user_2_email)
self.grant_role_to_user(test_user_2, reviewer_role)
def test_0010_verify_repository_review_components(self):
'''Ensure that the required review components exist.
Make sure all the components we are to review are recorded in the database.
'''
self.add_repository_review_component(name='Repository dependencies',
description='Repository dependencies defined in a file named repository_dependencies.xml included in the repository')
strings_displayed = ['Data types', 'Functional tests', 'README', 'Repository dependencies', 'Tool dependencies', 'Tools', 'Workflows']
self.manage_review_components(strings_displayed=strings_displayed)
def test_0015_create_repository(self):
"""Create and populate the filtering repository
We are at step 1.
Log in as test_user_1 and create the filtering repository, then upload a basic set of
components to be reviewed in subsequent tests.
"""
category = self.create_category(name='Test 0400 Repository Component Reviews', description='Test 0400 Repository Component Reviews')
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
strings_displayed = self.expect_repo_created_strings(repository_name)
repository = self.get_or_create_repository(name=repository_name,
description=repository_description,
long_description=repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id(category.id),
strings_displayed=strings_displayed)
self.upload_file(repository,
filename='filtering/filtering_1.1.0.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded filtering 1.1.0 tarball.',
strings_displayed=[],
strings_not_displayed=[])
self.upload_file(repository,
filename='filtering/filtering_test_data.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded filtering test data.',
strings_displayed=[],
strings_not_displayed=[])
self.upload_file(repository,
filename='readme.txt',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded readme.txt.',
strings_displayed=[],
strings_not_displayed=[])
def test_0020_review_repository(self):
'''Complete a review of the filtering repository.
We are at step 2 - Have test_user_2 complete a review of the repository.
Review all components of the filtering repository, with the appropriate contents and approved/not approved/not applicable status.
'''
self.login(email=common.test_user_2_email, username=common.test_user_2_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
review_contents_dict = {'Data types': dict(),
'README': dict(rating=5, comment='Clear and concise readme file, a true pleasure to read.', approved='yes', private='no'),
'Functional tests': dict(rating=5, comment='A good set of functional tests.', approved='yes', private='no'),
'Repository dependencies': dict(),
'Tool dependencies': dict(),
'Tools': dict(rating=5, comment='Excellent tool, easy to use.', approved='yes', private='no'),
'Workflows': dict()}
self.create_repository_review(repository, review_contents_dict)
def test_0025_verify_repository_review(self):
'''Verify that the review was completed and displays properly.
We are at step 3 - Have test_user_1 browse the review.
Verify that all the review components were submitted, and that the repository owner can see the review.
'''
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
strings_displayed = ['Data types', 'Functional tests', 'yes', 'A good set of functional tests.', 'README', 'yes', 'Workflows', 'Tools']
strings_displayed.extend(['Clear and concise readme file, a true pleasure to read.', 'Tool dependencies', 'not_applicable'])
strings_displayed.extend(['Repository dependencies', 'Excellent tool, easy to use.'])
strings_displayed = ['Browse reviews of this repository']
self.display_manage_repository_page(repository, strings_displayed=strings_displayed)
self.verify_repository_reviews(repository, reviewer=user, strings_displayed=strings_displayed)
def test_0030_browse_with_other_user(self):
'''Verify that test_user_3 is blocked from browsing the review.
We are at step 4 - Have test_user_3 browse the repository and make sure they are not allowed to browse the review.
'''
self.login(email=common.test_user_3_email, username=common.test_user_3_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
strings_not_displayed = ['Browse reviews of this repository']
self.display_manage_repository_page(repository, strings_not_displayed=strings_not_displayed)
strings_not_displayed = ['A good set of functional tests.', 'Clear and concise readme file, a true pleasure to read.']
strings_not_displayed.append('Excellent tool, easy to use.')
changeset_revision = self.get_repository_tip(repository)
review = self.test_db_util.get_repository_review_by_user_id_changeset_revision(user.id, repository.id, changeset_revision)
self.browse_component_review(review, strings_not_displayed=strings_not_displayed)
def test_0035_grant_write_access_to_other_user(self):
'''Grant write access on the filtering_0410 repository to test_user_3.
We are at step 5 - Have test_user_1 give write permission on the repository to the test_user_3.
'''
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
self.grant_write_access(repository, usernames=[common.test_user_3_name])
def test_0040_verify_test_user_3_can_browse_reviews(self):
'''Check that test_user_3 can now browse reviews.
We are at step 6 - Have test_user_3 browse the repository again and they should now have the ability to browse the review.
'''
self.login(email=common.test_user_3_email, username=common.test_user_3_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
strings_displayed = ['Browse reviews of this repository']
self.display_manage_repository_page(repository, strings_displayed=strings_displayed)
def test_0045_verify_browse_review_with_write_access(self):
'''Check that test_user_3 can now display reviews.
We are at step 7 - Have test_user_3 browse the review.
'''
self.login(email=common.test_user_3_email, username=common.test_user_3_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
strings_displayed = ['A good set of functional tests.',
'Clear and concise readme file',
'a true pleasure to read.',
'Excellent tool, easy to use.']
changeset_revision = self.get_repository_tip(repository)
review = self.test_db_util.get_repository_review_by_user_id_changeset_revision(user.id, repository.id, changeset_revision)
self.browse_component_review(review, strings_displayed=strings_displayed)
| 63.712042 | 162 | 0.689046 |
ece08db469b8b7b561c4451ec360e88729a13e65
| 1,314 |
py
|
Python
|
flask_service/tests/test_swagger.py
|
mwprog/atomist-flask-microservice
|
65a18a0f149bf30af3cb5f9eb0818aa784901ade
|
[
"Apache-2.0"
] | null | null | null |
flask_service/tests/test_swagger.py
|
mwprog/atomist-flask-microservice
|
65a18a0f149bf30af3cb5f9eb0818aa784901ade
|
[
"Apache-2.0"
] | 6 |
2018-06-06T20:00:46.000Z
|
2018-06-08T14:19:55.000Z
|
flask_service/tests/test_swagger.py
|
mwprog/atomist-flask-microservice
|
65a18a0f149bf30af3cb5f9eb0818aa784901ade
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask_service.swagger import spec
from flask_service import __version__
def test_document_meta():
api = spec.to_dict()
assert api['info']['title'] == 'flask_service'
assert api['info']['version'] == __version__
assert api['info']['description'] == 'My application'
def test_document_health_endpoint():
api = spec.to_dict()
assert '/health' in api['paths']
assert 'get' in api['paths']['/health']
assert '200' in api['paths']['/health']['get']['responses']
assert 'string' in api['paths']['/health']['get']['responses']['200']['schema']
assert '503' in api['paths']['/health']['get']['responses']
assert 'string' in api['paths']['/health']['get']['responses']['503']['schema']
def test_document_status_endpoint():
api = spec.to_dict()
assert '/status' in api['paths']
assert 'get' in api['paths']['/status']
assert '200' in api['paths']['/status']['get']['responses']
assert 'string' in api['paths']['/status']['get']['responses']['200']['schema']
def test_document_my_app_endpoint():
api = spec.to_dict()
assert '/' in api['paths']
assert 'get' in api['paths']['/']
assert '200' in api['paths']['/']['get']['responses']
assert 'string' in api['paths']['/']['get']['responses']['200']['schema']
| 36.5 | 83 | 0.617199 |
424f257e072dc59501f85a31fa24fc892b213eff
| 51 |
py
|
Python
|
tlp/django_app/oscar_apps/promotions/admin.py
|
munisisazade/create-django-app
|
f62395af2adaacacc4d3a3857c6570c9647d13a1
|
[
"MIT"
] | 14 |
2018-01-08T12:50:10.000Z
|
2021-12-26T18:38:14.000Z
|
tlp/django_app/oscar_apps/promotions/admin.py
|
munisisazade/create-django-app
|
f62395af2adaacacc4d3a3857c6570c9647d13a1
|
[
"MIT"
] | 10 |
2018-03-01T14:17:05.000Z
|
2022-03-11T23:26:11.000Z
|
tlp/django_app/oscar_apps/promotions/admin.py
|
munisisazade/create-django-app
|
f62395af2adaacacc4d3a3857c6570c9647d13a1
|
[
"MIT"
] | 4 |
2019-04-09T17:29:34.000Z
|
2020-06-07T14:46:23.000Z
|
from oscar.apps.promotions.admin import * # noqa
| 17 | 49 | 0.745098 |
8e64e54226fd48e478c3ace52062c9a4fabb648d
| 1,272 |
py
|
Python
|
config.py
|
pubggamerzytm/Telegram-Airdrop-Bot
|
f94bc81a39528794c389b9528b913e59bf3acca4
|
[
"MIT"
] | null | null | null |
config.py
|
pubggamerzytm/Telegram-Airdrop-Bot
|
f94bc81a39528794c389b9528b913e59bf3acca4
|
[
"MIT"
] | null | null | null |
config.py
|
pubggamerzytm/Telegram-Airdrop-Bot
|
f94bc81a39528794c389b9528b913e59bf3acca4
|
[
"MIT"
] | null | null | null |
# --------------------------------------------- #
# Plugin Name : TelegramAirdropBot #
# Author Name : vsnz #
# File Name : config.py #
# --------------------------------------------- #
# Enable / disable the airdrop
airdrop_live = True
# Telegram
token = '1309504600:AAEeBquYnfrdKOqGd_1kBUwsPengw42bTHo' # More: https://core.telegram.org/bots#3-how-do-i-create-a-bot
log_channel = '-412534591' # Channel ID. Example: -1001355597767
admins = [] # Telegram User ID's. Admins are able to execute command "/airdroplist"
airdrop_cap = 100 # Max airdrop submissions that are being accepted
texts = {
'start_1': 'Hi {} and welcome to our Airdrop!\n\nGet started by clicking the button below.\n\n',
'start_2': 'Hi {},\n\nYour address has been added to the airdrop list!\n\n',
'airdrop_start': 'The airdrop didn\'t start yet.',
'airdrop_address': 'Type in your $ETH address:',
'airdrop_max_cap': 'ℹ️ The airdrop reached its max cap.',
'airdrop_walletused': '⚠️ That address has already been used. Use a different one.',
'airdrop_confirmation': '✅ Your address has been added to airdrop list.',
}
| 47.111111 | 129 | 0.568396 |
ae11e1243cb53f5f42b6ff89be2e7f16c453990c
| 23,983 |
py
|
Python
|
scripts/momentum-budget.py
|
jenfly/monsoon-onset
|
6d8651a337daa174960e716d378292452db77246
|
[
"MIT"
] | 3 |
2016-10-22T07:11:46.000Z
|
2021-05-23T12:21:21.000Z
|
scripts/momentum-budget.py
|
jenfly/monsoon-onset
|
6d8651a337daa174960e716d378292452db77246
|
[
"MIT"
] | 1 |
2022-02-23T08:28:52.000Z
|
2022-02-23T08:28:52.000Z
|
scripts/momentum-budget.py
|
jenfly/monsoon-onset
|
6d8651a337daa174960e716d378292452db77246
|
[
"MIT"
] | 2 |
2017-05-25T02:45:53.000Z
|
2021-08-07T09:30:02.000Z
|
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import numpy as np
import xarray as xray
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import collections
import atmos as atm
import merra
import indices
import utils
# ----------------------------------------------------------------------
yearstr = '1979-2014'
onset_nm = 'CHP_MFC'
ndays = 5
lon1, lon2 = 60, 100
daynm, yearnm = 'dayrel', 'year'
latname, lonname, pname = 'YDim', 'XDim', 'Height'
datadir = atm.homedir() + 'datastore/merra/analysis/'
savedir = 'figs/'
filenm = datadir + 'ubudget/merra_ubudget_dailyrel_%s_ndays%d_%dE-%dE_%s.nc'
files = {}
files['ubudget'] = filenm % (onset_nm, ndays, lon1, lon2, yearstr)
varnms = ['U', 'V']
plev_plot = 200
pmid = 500 # Pressure level to plot psi latitude-day contours
for nm in varnms:
filenm = datadir + 'merra_%s%d_dailyrel_%s_%s.nc'
files[nm] = filenm % (nm, plev_plot, onset_nm, yearstr)
filenm = datadir + 'merra_%s_sector_%dE-%dE_dailyrel_%s_%s.nc'
files[nm + '_latp'] = filenm % (nm, lon1, lon2, onset_nm, yearstr)
# ----------------------------------------------------------------------
# Read data from each year
# Zonal momentum budget components
filenm = files['ubudget']
print('Loading ' + filenm)
with xray.open_dataset(filenm) as ubudget:
ubudget.load()
# Scaling factor for all terms in momentum budget
scale = 1e-4
ubudget = ubudget / scale
ubudget.attrs['comp_units'] = '%.0e m/s2' % scale
# Read other lat-lon variables and smooth with rolling mean
for nm in varnms:
filenm = files[nm]
print('Loading ' + filenm)
with xray.open_dataset(filenm) as ds:
var = ds['%s%d' % (nm, plev_plot)].load()
daydim = atm.get_coord(var, coord_name=daynm, return_type='dim')
ubudget[nm] = atm.rolling_mean(var, ndays, axis=daydim, center=True)
# Read other lat-pres variables and smooth with rolling mean
data_latp = xray.Dataset()
for nm in varnms:
varnm = nm + '_latp'
filenm = files[varnm]
print('Loading ' + filenm)
with xray.open_dataset(filenm) as ds:
var = ds[nm].load()
daydim = atm.get_coord(var, coord_name=daynm, return_type='dim')
data_latp[nm] = atm.rolling_mean(var, ndays, axis=daydim, center=True)
# Compute streamfunction
print('Computing streamfunction')
if (lon2 - lon1) < 360:
sector_scale = (lon2 - lon1) / 360.
else:
sector_scale = None
data_latp['PSI'] = atm.streamfunction(data_latp['V'], sector_scale=sector_scale)
# Additional metadata
ubudget.attrs['ndays'] = ndays
ubudget.attrs['lon1'] = lon1
ubudget.attrs['lon2'] = lon2
# Topography for lat-pres contour plots
print('Loading topography')
psfile = atm.homedir() + 'dynamics/python/atmos-tools/data/topo/ncep2_ps.nc'
with xray.open_dataset(psfile) as ds:
ps = ds['ps'] / 100
if (lon2 - lon1) < 360:
ps = atm.dim_mean(ps, 'lon', lon1, lon2)
else:
ps = atm.dim_mean(ps, 'lon')
# ----------------------------------------------------------------------
# Consolidate terms together
groups = collections.OrderedDict()
groups['ADV_AVG'] = ['ADV_AVG_AVG_X', 'ADV_AVG_AVG_Y', 'ADV_AVG_AVG_P']
groups['ADV_AVST'] = ['ADV_AVG_ST_X', 'ADV_AVG_ST_Y', 'ADV_AVG_ST_P']
groups['ADV_STAV'] = ['ADV_ST_AVG_X', 'ADV_ST_AVG_Y', 'ADV_ST_AVG_P']
groups['ADV_CRS'] = ['ADV_AVST', 'ADV_STAV']
groups['EMFC_TR'] = ['EMFC_TR_X', 'EMFC_TR_Y', 'EMFC_TR_P']
groups['EMFC_ST'] = ['EMFC_ST_X', 'EMFC_ST_Y', 'EMFC_ST_P']
groups['EMFC'] = ['EMFC_TR', 'EMFC_ST']
groups['COR'] = ['COR_AVG', 'COR_ST']
groups['ADV+COR'] = ['ADV_AVG', 'COR_AVG']
groups['SUM'] = ['ADV_AVG', 'ADV_CRS', 'EMFC', 'COR', 'PGF_ST', 'ANA']
print('Consolidating ubudget terms')
for key in groups:
nms = groups[key]
ubudget[key] = ubudget[nms[0]]
for nm in nms[1:]:
ubudget[key] = ubudget[key] + ubudget[nm]
# Tile the zonal mean values
varbig = ubudget['SUM']
for nm in ubudget.data_vars:
if lonname not in ubudget[nm].dims:
vals = atm.biggify(ubudget[nm], varbig, tile=True)
ubudget[nm] = xray.DataArray(vals, coords=varbig.coords)
# Sector mean budget
print('Computing sector mean ubudget')
ubudget_sector = atm.dim_mean(ubudget, 'lon', lon1, lon2)
# Streamfunction mean and eddy-driven decomposition
print('Computing streamfunction components')
eqbuf = 5.0
sector_scale = (lon2 - lon1) / 360.0
v = utils.v_components(ubudget_sector, scale=scale, eqbuf=eqbuf)
psi_comp = xray.Dataset()
for nm in v.data_vars:
psi_comp[nm] = atm.streamfunction(v[nm], sector_scale=sector_scale)
# Extract single pressure level for line plots
print('Extracting single pressure level for plots')
attrs = ubudget.attrs
attrs['plev'] = plev_plot
ubudget = atm.subset(ubudget, {pname: (plev_plot, plev_plot)}, squeeze=True)
ubudget_sector_plevs = ubudget_sector.copy()
ubudget_sector = atm.subset(ubudget_sector, {pname: (plev_plot, plev_plot)},
squeeze=True)
ubudget.attrs = attrs
ubudget_sector.attrs = attrs
print('Finished loading/calculating data')
# ----------------------------------------------------------------------
# Utility functions and plot formatting options
def saveclose(filestr):
atm.savefigs(filestr, ext='pdf', merge=True)
plt.close('all')
def get_daystr(plotdays):
if len(atm.makelist(plotdays)) > 1:
daystr = 'Rel Days %d to %d' % (plotdays[0], plotdays[-1])
savestr = 'reldays%d_%d' % (plotdays[0], plotdays[-1])
else:
daystr = 'Rel Day %d' % plotdays
savestr = 'relday%d' % plotdays
return daystr, savestr
# ----------------------------------------------------------------------
# Streamfunction latitude-day contours
psimid = atm.subset(data_latp['PSI'], {pname : (pmid, pmid)}, squeeze=True)
lat = atm.get_coord(data_latp, 'lat')
days = atm.get_coord(data_latp, 'dayrel')
clev = np.arange(-70, 71, 5)
ticks = np.arange(-70, 71, 10)
title='PSI%d' % pmid
plt.figure(figsize=(10, 7))
plt.contourf(days, lat, psimid.T, clev, cmap='RdBu_r', extend='both')
cb = plt.colorbar(ticks=ticks)
plt.ylim(-60, 60)
plt.xticks(np.arange(-120, 201, 30))
plt.grid()
plt.title(title)
plt.xlabel('Day Rel')
plt.ylabel('Latitude')
# ----------------------------------------------------------------------
# Streamfunction decomposition
def psi_latpres(psi, ps, cint=10, xlims=(-60, 60), xticks=range(-60, 61, 15),
title=''):
xmin, xmax = xlims
axlims = (xmin, xmax, 0, 1000)
atm.contour_latpres(psi, clev=cint, topo=ps, omitzero=True, axlims=axlims)
plt.xticks(xticks, xticks)
plt.grid()
plt.title(title, fontsize=10)
# plotdays = [-30, -15, 0, 15, 30]
# keys = ['TOT', 'MMC', 'EDDY', 'PGF', 'RESID']
plotdays = [-30, 0, 30]
keys = ['TOT', 'MMC', 'EDDY']
xlims, xticks = (-35, 35), range(-30, 31, 10)
cint = 5
nrow, ncol = len(keys), len(plotdays)
advance_by = 'col'
fig_kw = {'figsize' : (11, 7), 'sharex' : True, 'sharey' : True}
gridspec_kw = {'left' : 0.08, 'right' : 0.99, 'wspace' : 0.06, 'hspace' : 0.08,
'bottom' : 0.08, 'top' : 0.9}
# fig_kw = {'figsize' : (14, 8), 'sharex' : True, 'sharey' : True}
# gridspec_kw = {'left' : 0.06, 'right' : 0.99, 'wspace' : 0.06, 'hspace' : 0.08,
# 'bottom' : 0.06, 'top' : 0.92}
suptitle = '%d-%dE $\psi$ components' % (lon1, lon2)
grp = atm.FigGroup(nrow, ncol, advance_by, fig_kw=fig_kw,
gridspec_kw=gridspec_kw, suptitle=suptitle)
for key in keys:
for day in plotdays:
grp.next()
if grp.row == 0:
title = 'Day %d' % day
else:
title = ''
if key == 'TOT':
psi = data_latp['PSI'].sel(dayrel=day)
else:
psi = psi_comp[key].sel(dayrel=day)
psi_latpres(psi, ps, cint, xlims, xticks, title=title)
if grp.col > 0:
plt.ylabel('')
if grp.row < grp.nrow - 1:
plt.xlabel('')
atm.text(key, (0.05, 0.88))
# ----------------------------------------------------------------------
# Lat-pres contours of ubudget components
day = 0
nm = 'COR_AVG'
var = ubudget_sector_plevs[nm].sel(dayrel=day)
plt.figure()
atm.pcolor_latpres(var)
plt.xlim(-60,60)
# ----------------------------------------------------------------------
# Lat-pres contours and line plots on individual days
def latpres(data_latp, day, ps, xlims=(-60, 60), xticks=range(-60, 61, 15),
title=None, clev_u=5, clev_psi=5, u_clr='m', u_kw={'alpha' : 0.35},
psi_kw={'alpha' : 0.7}):
"""Plot lat-pres contours of streamfunction and zonal wind.
"""
xmin, xmax = xlims
axlims = (xmin, xmax, 0, 1000)
latp_data = atm.subset(data_latp, {'dayrel' : (day, day)}, squeeze=True)
u = latp_data['U']
psi = latp_data['PSI']
atm.contour_latpres(u, clev=clev_u, topo=ps, colors=u_clr,
contour_kw=u_kw, axlims=axlims)
atm.contour_latpres(psi, clev=clev_psi, omitzero=True, axlims=axlims,
contour_kw=psi_kw)
plt.xticks(xticks, xticks)
plt.grid()
if title is not None:
plt.title(title)
def lineplot(ubudget_sector, keys, day, style, xlims=(-60, 60),
xticks=range(-60, 61, 15), title=None, ylabel=None, legend=True,
legend_kw={'fontsize' : 8, 'loc' : 'lower center', 'ncol' : 2,
'handlelength' : 2.5}):
"""Plot ubudget terms and winds vs latitude."""
subset_dict = {'dayrel' : (day, day), 'lat': xlims}
data = atm.subset(ubudget_sector[keys], subset_dict, squeeze=True)
data = data.to_dataframe()
data.plot(ax=plt.gca(), style=style, legend=False)
plt.xlim(xlims)
plt.xticks(xticks, xticks)
plt.xlabel('Latitude')
plt.grid()
if legend:
plt.legend(**legend_kw)
if ylabel is not None:
plt.ylabel(ylabel)
if title is not None:
plt.title(title)
# Summary plot of psi and u lat-pres contours for presentation
nrow, ncol = 2, 2
advance_by = 'row'
fig_kw = {'figsize' : (11, 7), 'sharex' : 'col', 'sharey' : 'row'}
gridspec_kw = {'left' : 0.1, 'right' : 0.96, 'wspace' : 0.06, 'hspace' : 0.2,
'bottom' : 0.08, 'top' : 0.95}
plotdays = [-15, 0, 15, 30]
xlims, xticks = (-35, 35), range(-30, 31, 10)
grp = atm.FigGroup(nrow, ncol,fig_kw=fig_kw, gridspec_kw=gridspec_kw)
for day in plotdays:
grp.next()
title = 'Day %d' % day
latpres(data_latp, day, ps, xlims=xlims, xticks=xticks)
plt.title(title, fontsize=11)
if grp.row < grp.nrow - 1:
plt.xlabel('')
if grp.col > 0:
plt.ylabel('')
# Lat-pres contours and line plots of 200 mb momentum budget
style = {'ADV_AVG' : 'b', 'COR_AVG' : 'b--', 'ADV+COR' : 'r',
'PGF_ST' : 'k', 'ADV_CRS' : 'g', 'ADV_AVST' : 'g--',
'ADV_STAV' : 'g-.', 'EMFC' : 'm', 'EMFC_TR' : 'm--', 'EMFC_ST' : 'm-.',
'SUM' : 'k--', 'ACCEL' : 'c', 'ANA' : 'y', 'U' : 'k', 'V' : 'k--'}
keys_dict = collections.OrderedDict()
#keys_dict['ubudget'] = ['ADV_AVG', 'COR_AVG', 'ADV+COR', 'PGF_ST',
# 'ADV_CRS', 'EMFC', 'ANA', 'SUM', 'ACCEL']
keys_dict['ubudget'] = ['ADV_AVG', 'COR_AVG', 'ADV+COR', 'PGF_ST',
'ADV_CRS', 'EMFC']
keys_dict['winds'] = ['U', 'V']
keys_dict['eddies'] = ['EMFC_TR', 'EMFC_ST', 'EMFC', 'ADV_CRS']
ylabels = {}
units = '$10^{-4}$ m s$^{-2}$'
#ylabels['ubudget'] = '%d hPa ubudget (%s)' % (plev_plot, units)
ylabels['ubudget'] = units
ylabels['eddies'] = ylabels['ubudget']
#ylabels['winds'] = '%d hPa winds (m/s)' % plev_plot
ylabels['winds'] = 'm/s'
#plotdays = [-30, -15, 0, 15, 30] + [-90, -45, 0, 45, 90]
#nrow, ncol = 4, 5
plotdays = [-30, 0, 30]
nrow, ncol = 4, 3
advance_by = 'row'
# fig_kw = {'figsize' : (18, 12), 'sharex' : 'col', 'sharey' : 'row'}
# gridspec_kw = {'left' : 0.05, 'right' : 0.99, 'wspace' : 0.06, 'hspace' : 0.08,
# 'bottom' : 0.04, 'top' : 0.92, 'height_ratios' : [1, 0.6, 1, 1]}
fig_kw = {'figsize' : (11, 9), 'sharex' : 'col', 'sharey' : 'row'}
gridspec_kw = {'left' : 0.08, 'right' : 0.99, 'wspace' : 0.09, 'hspace' : 0.1,
'bottom' : 0.05, 'top' : 0.92, 'height_ratios' : [1, 0.6, 1, 1]}
legend_kw={'fontsize' : 8, 'loc' : 'upper center', 'ncol' : 2,
'handlelength' : 2.5}
suptitle = '%d-%d E U and $\psi$ contours, ubudget at 200 hPa' % (lon1, lon2)
#for tropics in [False, True]:
for tropics in [False]:
if tropics:
xlims, xticks = (-35, 35), range(-30, 31, 10)
else:
xlims, xticks = (-60, 60), range(-60, 61, 15)
grp = atm.FigGroup(nrow, ncol, advance_by, fig_kw=fig_kw,
gridspec_kw=gridspec_kw, suptitle=suptitle)
for day in plotdays:
grp.next()
if grp.row == 0:
title = 'Day %d' % day
else:
title = None
latpres(data_latp, day, ps, title=title, xlims=xlims, xticks=xticks)
for nm in ['winds', 'ubudget', 'eddies']:
grp.next()
if grp.col == 0:
legend = True
# if nm == 'ubudget' :
# legend_kw['loc'] = 'lower center'
# else:
# legend_kw['loc'] = 'upper center'
else:
legend = False
keys = keys_dict[nm]
lineplot(ubudget_sector, keys, day, style, xlims=xlims,
xticks=xticks, legend=legend, legend_kw=legend_kw,
ylabel=ylabels[nm])
saveclose(savedir + 'ubudget_sector_latpres_lineplots')
# ----------------------------------------------------------------------
# Plot groups together
keys_list = [['ADV_AVG', 'ADV_CRS', 'COR_AVG', 'COR_ST', 'EMFC', 'PGF_ST',
'SUM', 'ACCEL'],
['U', 'V'],
['ADV_AVG', 'ADV_AVST', 'ADV_STAV', 'ADV_CRS'],
['COR_AVG', 'COR_ST', 'COR'],
['EMFC_TR', 'EMFC_ST', 'EMFC']]
def pcolor_sector(var, daynm, clims, u=None, v=None):
days = var[daynm].values
lat = atm.get_coord(var, 'lat')
x, y = np.meshgrid(days, lat)
vals = var.values.T
vals = np.ma.masked_array(vals, mask=np.isnan(vals))
plt.pcolormesh(x, y, vals, cmap='RdBu_r')
plt.clim(clims)
plt.colorbar(extend='both')
if u is not None:
plt.contour(x, y, u.values.T, [0], colors='k', linewidths=1.5)
if v is not None:
plt.contour(x, y, v.values.T, [0], colors='k', alpha=0.5)
plt.xlim(days.min(), days.max())
plt.xlabel('Rel Day')
plt.ylabel('Latitude')
def plot_groups(ubudget, keys_list, daynm, plotdays=None, latlims=None):
"""Plot groups of lat-lon or lat-day plots.
"""
if latlims is not None:
ubudget = atm.subset(ubudget, {'lat' : latlims})
units = ubudget.attrs['comp_units']
plev = ubudget.attrs['plev']
lon1, lon2 = ubudget.attrs['lon1'], ubudget.attrs['lon2']
try:
lon = atm.get_coord(ubudget, 'lon')
sector = False
except ValueError:
sector = True
if sector:
suptitle = '%d-%d E Zonal Momentum Budget at %d hPa (%s)'
suptitle = suptitle % (lon1, lon2, plev, units)
xticks = range(-120, 201, 60)
else:
daystr, savestr = get_daystr(plotdays)
suptitle = '%s Zonal Momentum Budget at %d hPa (%s)'
suptitle = suptitle % (daystr, plev, units)
xticks = range(40, 121, 20)
nrow, ncol = 3, 4
figsize = (14, 10)
opts = {'left' : 0.05, 'right' : 0.95, 'bottom' : 0.04, 'top' : 0.92,
'wspace' : 0.1, 'hspace' : 0.1}
for i, keys in enumerate(keys_list):
if sector:
data = ubudget[keys]
else:
data = atm.subset(ubudget[keys], {daynm : (plotdays, None)})
if len(atm.makelist(plotdays)) > 1:
data = data.mean(dim=daynm)
clims = atm.climits(data, symmetric=True)
if sector:
clims = 0.9 * np.array(clims)
if i == 0 or i == 2:
isub = 0
plt.figure(figsize=figsize)
plt.suptitle(suptitle)
plt.subplots_adjust(**opts)
for j, nm in enumerate(keys):
isub += 1
if 'U' in keys:
clims = atm.climits(data[nm], symmetric=True)
plt.subplot(nrow, ncol, isub)
if sector:
pcolor_sector(data[nm], daynm, clims, ubudget['U'], ubudget['V'])
else:
atm.pcolor_latlon(data[nm], fancy=False)
plt.clim(clims)
plt.title(nm, fontsize=9)
atm.fmt_subplot(nrow, ncol, isub, xticks=xticks)
plt.grid(True)
# Skip to next row if necessary
if ncol > len(keys):
isub += ncol - len(keys)
for tropics in [True, False]:
savestr = savedir + 'ubudget_'
if tropics:
savestr = savestr + 'tropics_'
latlims = [-30, 30]
else:
latlims = None
# Lat-lon maps
for plotdays in [-90, -30, 0, 30, 60]:
plot_groups(ubudget, keys_list, daynm, plotdays, latlims)
saveclose(savestr + 'latlon')
# Sector lat-day maps
plot_groups(ubudget_sector, keys_list, daynm, None, latlims)
saveclose(savestr + 'sector_latday')
# ----------------------------------------------------------------------
# def zerocrossings(var, latmin, latmax, smoothing=30, interp_res=0.1, nkeep=3):
# var = atm.subset(var, {'lat' : (latmin, latmax)})
# if smoothing is not None:
# var = atm.rolling_mean(var, smoothing, axis=0, center=True)
# lat = atm.get_coord(var, 'lat')
# lat_i = np.arange(latmin, latmax + interp_res, interp_res)
# daynm = var.dims[0]
# days = var[daynm]
# crossings = np.nan * np.ones((nkeep, len(days)), dtype=float)
#
# for d, day in enumerate(days):
# vals = var.sel(**{daynm : day})
# if not np.isnan(vals).all():
# vals = np.interp(lat_i, lat, vals)
# icross = np.where(np.diff(np.sign(vals)))[0]
# latcross = lat_i[icross]
# n = min(nkeep, len(latcross))
# crossings[:n, d] = latcross[:n]
#
# coords = {'n' : np.arange(nkeep) + 1, daynm : var[daynm]}
# crossings = xray.DataArray(crossings, name='zerolat', dims=['n', daynm],
# coords=coords)
#
# return crossings
#
# def psimax_lat(psi, latmin=-30, latmax=10, pmin=300, pmax=700, nsmooth=5):
# days_in = psi['dayrel']
# psi = atm.subset(psi, {'lat' : (latmin, latmax), 'plev' : (pmin, pmax)},
# squeeze=True)
# psi = psi[nsmooth:-nsmooth]
# pdim = atm.get_coord(psi, 'plev', 'dim')
# psi = psi.max(axis=pdim)
#
# lat = atm.get_coord(psi, 'lat')
# latdim = atm.get_coord(psi, 'lat', 'dim')
# ilatmax = psi.argmax(axis=latdim)
# latmax = lat[ilatmax]
# days = atm.get_coord(psi, 'dayrel')
# latmax = xray.DataArray(latmax, coords={'dayrel' : days})
# latmax = latmax.reindex_like(days_in)
# return latmax
# ----------------------------------------------------------------------
# # Line plots on individual days
#
# latmin, latmax = -40, 50
# smoothing = None
# nkeep = {'U' : 2, 'V' : 3}
# zerolats = xray.Dataset()
# for nm in nkeep:
# n = nkeep[nm]
# crossings = zerocrossings(ubudget_sector[nm], latmin, latmax, nkeep=n,
# smoothing=smoothing)
# for i in crossings['n'].values:
# key = nm + '%d' % i
# zerolats[key] = crossings.sel(n=i).drop('n')
#
# check_zerolats = False
# if check_zerolats:
# plt.figure()
# for nm in zerolats.data_vars:
# plt.plot(zerolats[daynm], zerolats[nm], label=nm)
# plt.legend()
#
#
# style = {'ADV_AVG' : 'b', 'COR_AVG' : 'b--', 'ADV+COR' : 'r',
# 'PGF_ST' : 'k', 'ADV_CRS' : 'g', 'ADV_AVST' : 'g--',
# 'ADV_STAV' : 'g-.', 'EMFC' : 'm', 'EMFC_TR' : 'm--', 'EMFC_ST' : 'm-.',
# 'SUM' : 'k--', 'ACCEL' : 'c', 'ANA' : 'y', 'U' : 'k', 'V' : 'k--'}
#
# keys_dict = collections.OrderedDict()
# keys_dict['ubudget'] = ['ADV_AVG', 'COR_AVG', 'ADV+COR', 'PGF_ST',
# 'ADV_CRS', 'EMFC', 'ANA', 'SUM', 'ACCEL']
# keys_dict['winds'] = ['U', 'V']
# keys_dict['eddies'] = ['EMFC_TR', 'EMFC_ST', 'EMFC', 'ADV_AVST', 'ADV_STAV',
# 'ADV_CRS']
# suptitle = '%d-%d E %s at %d hPa'
# suptitles = {}
# suptitles['ubudget'] = suptitle % (lon1, lon2, 'Zonal Momentum Budget', plev)
# suptitles['eddies'] = suptitles['ubudget']
# suptitles['winds'] = suptitle % (lon1, lon2, 'Winds', plev)
# ylabels = {}
# ylabels['ubudget'] = 'ubudget (%s)' % ubudget.attrs['comp_units']
# ylabels['eddies'] = ylabels['ubudget']
# ylabels['winds'] = 'winds (m/s)'
#
# plotdays = [-90, -30, -15, 0, 15, 30, 60, 90]
# nrow, ncol = 2, 4
# figsize = (14, 10)
# lat = atm.get_coord(ubudget, 'lat')
# latname = atm.get_coord(ubudget, 'lat', 'name')
# opts = {'left' : 0.05, 'right' : 0.95, 'bottom' : 0.06, 'top' : 0.94,
# 'wspace' : 0.1, 'hspace' : 0.1}
# lg_row, lg_col, lg_loc, lg_ncol = 2, 1, 'upper center', 2
# zlat_opts = {'U1' : {'label' : 'U=0'}, 'U2' : {},
# 'V1' : {'linestyle' : 'dashed', 'label' : 'V=0'},
# 'V2' : {'linestyle' : 'dashed'}, 'V3' : {'linestyle' : 'dashed'}}
#
# for nm in keys_dict:
# keys = keys_dict[nm]
# suptitle, ylabel = suptitles[nm], ylabels[nm]
# for latlims in [(-60, 60), (-35, 35)]:
# fig, axes = plt.subplots(nrow, ncol, figsize=figsize, sharex=True,
# sharey=True)
# plt.subplots_adjust(**opts)
# plt.autoscale(tight=True)
# plt.suptitle(suptitle)
# for i, day in enumerate(plotdays):
# row, col = atm.subplot_index(nrow, ncol, i + 1)
# ax = axes[row - 1, col - 1]
# subset_dict = {daynm : (day, day), latname: latlims}
# data = atm.subset(ubudget_sector[keys], subset_dict, squeeze=True)
# #data = data.drop(daynm).to_dataframe()
# data = data.to_dataframe()
# data.plot(ax=ax, style=style, legend=False)
# # Plot vertical lines for U=0 and V=0
# zlats = zerolats.sel(**{daynm : day})
# for nm in zlats.data_vars:
# ax.axvline(zlats[nm], color='k', alpha=0.5, linewidth=1.5,
# **zlat_opts[nm])
# ax.set_title('Day %d' % day, fontsize=10)
# ax.grid(True)
# if row == lg_row and col == lg_col:
# ax.legend(fontsize=9, loc=lg_loc, ncol=lg_ncol, handlelength=3)
# if row == nrow:
# ax.set_xlabel('Lat')
# if col == 1:
# ax.set_ylabel(ylabel)
#
# saveclose(savedir + 'ubudget_sector_lineplots')
# ----------------------------------------------------------------------
# latmax = psimax_lat(data_latp['PSI'], nsmooth=ndays, pmin=600, pmax=700)
#
# # Ubudget terms at latitude of psimax
# print('Computing ubudget terms at latitude of psimax for each day')
# days = latmax['dayrel']
# days = days[np.isfinite(latmax)]
# ubudget_psimax = xray.Dataset()
# for d, day in enumerate(days):
# lat0 = latmax.sel(dayrel=day).values
# ds = atm.subset(ubudget_sector, {'lat' : (lat0, lat0)}, squeeze=True)
# ds = atm.subset(ds, {'dayrel' : (day, day)}, squeeze=False)
# if d == 0:
# ubudget_psimax = ds
# else:
# ubudget_psimax = xray.concat([ubudget_psimax, ds], dim='dayrel')
#
# keys = ['ADV_AVG', 'COR_AVG', 'ADV+COR_AVG', 'PGF_ST',
# 'ADV_CRS', 'EMFC', 'ANA', 'SUM', 'ACCEL']
#
# xticks = range(-120, 201, 30)
# xlims = [-120, 200]
# plt.figure(figsize=(8, 12))
# plt.subplot(2, 1, 1)
# plt.plot(latmax['dayrel'], latmax)
# plt.xticks(xticks)
# plt.xlim(xlims)
# plt.grid(True)
# plt.subplot(2, 1, 2)
# ubudget_psimax[keys].to_dataframe().plot(ax=plt.gca(), style=style, legend=False)
# plt.legend(fontsize=8, ncol=3)
# plt.xticks(xticks)
# plt.xlim(xlims)
# plt.grid(True)
| 36.896923 | 83 | 0.562315 |
b32e27cbd86ecb26002c422478e64cd189ca3599
| 3,428 |
py
|
Python
|
src/urh/controller/SpectrumDialogController.py
|
awesome-archive/urh
|
c8c3aabc9d637ca660d8c72c3d8372055e0f3ec7
|
[
"Apache-2.0"
] | null | null | null |
src/urh/controller/SpectrumDialogController.py
|
awesome-archive/urh
|
c8c3aabc9d637ca660d8c72c3d8372055e0f3ec7
|
[
"Apache-2.0"
] | null | null | null |
src/urh/controller/SpectrumDialogController.py
|
awesome-archive/urh
|
c8c3aabc9d637ca660d8c72c3d8372055e0f3ec7
|
[
"Apache-2.0"
] | null | null | null |
from PyQt5.QtCore import pyqtSlot
from urh.FFTSceneManager import FFTSceneManager
from urh.controller.SendRecvDialogController import SendRecvDialogController
from urh.dev.VirtualDevice import VirtualDevice, Mode
from urh.plugins.NetworkSDRInterface.NetworkSDRInterfacePlugin import NetworkSDRInterfacePlugin
class SpectrumDialogController(SendRecvDialogController):
def __init__(self, freq, samp_rate, bw, gain, device: str, parent=None, testing_mode=False):
self.is_rx = True
super().__init__(freq, samp_rate, bw, gain, device, parent=parent, testing_mode=testing_mode)
self.graphics_view = self.ui.graphicsViewReceive
self.update_interval = 1
self.ui.stackedWidget.setCurrentIndex(0)
self.hide_receive_ui_items()
self.hide_send_ui_items()
self.setWindowTitle("Spectrum analyzer")
self.scene_manager = FFTSceneManager(parent=self, graphic_view=self.graphics_view)
self.graphics_view.setScene(self.scene_manager.scene)
self.graphics_view.scene_manager = self.scene_manager
# do not use network sdr plugin for spectrum analysis
index = next((i for i in range(self.ui.cbDevice.count())
if self.ui.cbDevice.itemText(i) == NetworkSDRInterfacePlugin.NETWORK_SDR_NAME), None)
if index is not None:
self.ui.cbDevice.removeItem(index)
self.init_device()
self.create_connects()
def create_connects(self):
super().create_connects()
self.graphics_view.freq_clicked.connect(self.on_graphics_view_freq_clicked)
def update_view(self):
if super().update_view():
x, y = self.device.spectrum
if x is None or y is None:
return
self.scene_manager.scene.frequencies = x
self.scene_manager.plot_data = y
self.scene_manager.init_scene()
self.scene_manager.show_full_scene()
self.graphics_view.update()
def init_device(self):
device_name = self.ui.cbDevice.currentText()
if self.device:
self.device.free_data()
# Can't perform gc.collect() here, because the dialog itself would be deleted
# see https://github.com/jopohl/urh/issues/83
# gc.collect()
self.device = VirtualDevice(self.backend_handler, device_name, Mode.spectrum, bw=1e6,
freq=433.92e6, gain=40, samp_rate=1e6,
device_ip="192.168.10.2", parent=self)
self._create_device_connects()
@pyqtSlot(float)
def on_graphics_view_freq_clicked(self, freq: float):
self.ui.spinBoxFreq.setValue(freq)
self.ui.spinBoxFreq.editingFinished.emit()
@pyqtSlot()
def on_freq_changed(self):
self.device.frequency = self.ui.spinBoxFreq.value()
self.scene_manager.scene.center_freq = self.ui.spinBoxFreq.value()
self.scene_manager.clear_path()
self.scene_manager.clear_peak()
@pyqtSlot()
def on_start_clicked(self):
super().on_start_clicked()
self.device.start()
@pyqtSlot()
def on_device_started(self):
super().on_device_started()
self.ui.btnClear.setEnabled(False)
self.ui.btnStart.setEnabled(False)
@pyqtSlot()
def on_clear_clicked(self):
self.scene_manager.clear_path()
self.scene_manager.clear_peak()
| 37.67033 | 107 | 0.672695 |
127485b72707708fecb395d8a73d7ab032cda0c1
| 1,580 |
py
|
Python
|
src/conll/ProcessConllFormat.py
|
vikasbahirwani/SequenceTagging
|
b4e0dc2a71f869a27ada003c9276fd1f269e230d
|
[
"Apache-2.0"
] | null | null | null |
src/conll/ProcessConllFormat.py
|
vikasbahirwani/SequenceTagging
|
b4e0dc2a71f869a27ada003c9276fd1f269e230d
|
[
"Apache-2.0"
] | null | null | null |
src/conll/ProcessConllFormat.py
|
vikasbahirwani/SequenceTagging
|
b4e0dc2a71f869a27ada003c9276fd1f269e230d
|
[
"Apache-2.0"
] | null | null | null |
__author__ = "Vikas Bahirwani"
DATADIR = "../../data/conll"
from pathlib import Path
def convertToSentences(conll_filename):
with Path(conll_filename).open('r') as f:
words = []
pos_tags = []
chunk_tags = []
ner_tags = []
for i, line in enumerate(f):
if line != '\n':
splits = line.strip().split()
words.append(splits[0])
pos_tags.append(splits[1])
chunk_tags.append(splits[2])
ner_tags.append(splits[3])
else:
yield ' '.join(words), ' '.join(pos_tags), ' '.join(chunk_tags), ' '.join(ner_tags)
words = []
pos_tags = []
chunk_tags = []
ner_tags = []
if __name__ == '__main__':
file_prefixes = ["train", "testa", "testb"]
for prefix in file_prefixes:
words_filename = str(Path(DATADIR, "{}.words.txt".format(prefix)))
tags_filename = str(Path(DATADIR, "{}.tags.txt".format(prefix)))
with Path(words_filename).open('w') as fwords, Path(tags_filename).open('w') as ftags:
for i, processed in enumerate(convertToSentences(str(Path(DATADIR, '{}.txt'.format(prefix))))):
sentence, _, _, ner_tags = processed
fwords.write("{}\n".format(sentence))
ftags.write("{}\n".format(ner_tags))
if i % 100 == 0:
print("{} {} lines processed".format(prefix, i))
print("{} A total of {} lines processed".format(prefix, i))
| 35.111111 | 107 | 0.525316 |
eeaa3f2ccbf9e024379137b6319c2582d30e5277
| 19,891 |
py
|
Python
|
concept_formation/structure_mapper.py
|
ThomasHoppe/concept_formation
|
2468fea78ba46804bf44228519eb33ebc5780d31
|
[
"MIT"
] | 47 |
2015-06-08T20:34:18.000Z
|
2021-09-26T17:59:06.000Z
|
concept_formation/structure_mapper.py
|
ThomasHoppe/concept_formation
|
2468fea78ba46804bf44228519eb33ebc5780d31
|
[
"MIT"
] | 65 |
2015-07-27T18:16:31.000Z
|
2021-10-04T14:02:51.000Z
|
concept_formation/structure_mapper.py
|
ThomasHoppe/concept_formation
|
2468fea78ba46804bf44228519eb33ebc5780d31
|
[
"MIT"
] | 13 |
2015-07-27T13:27:03.000Z
|
2022-03-15T02:18:10.000Z
|
"""
This module contains the
:class:`StructureMapper<concept_formation.structure_mapper.StructureMapper>`
class which is used rename variable attributes it improve the category utility
on instances.
It is an instance of a
:class:`preprocessor<concept_formation.preprocessor.Preprocessor>` with a
:func:`transform` and :func:`undo_tranform` methods.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from random import choice
from random import random
from itertools import combinations
from munkres import Munkres
# from scipy.optimize import linear_sum_assignment
from py_search.base import Problem
from py_search.base import Node
from py_search.optimization import hill_climbing
from concept_formation.preprocessor import Preprocessor
from concept_formation.preprocessor import rename_relation
from concept_formation.preprocessor import get_attribute_components
from concept_formation.cobweb3 import Cobweb3Node
from concept_formation.cobweb3 import cv_key
def get_component_names(instance, vars_only=True):
"""
Given an instance or a concept's probability table return a list of all of
the component names. If vars_only is false, than all constants and
variables are returned.
:param instance: An instance or a concept's probability table.
:type instance: an instance
:param vars_only: Whether or not to return only variables (i.e., strings
with a names with a '?' at the beginning) or both variables and
constants.
:type vars_only: boolean
:return: A frozenset of all of the component names present in the instance
:rtype: frozenset
>>> instance = {('a', ('sub1', 'c1')): 0, ('a', 'c2'): 0,
... ('_', '_a', 'c3'): 0}
>>> names = get_component_names(instance, False)
>>> frozenset(names) == frozenset({'c3', 'c2', ('sub1', 'c1'), 'sub1', 'a',
... ('a', ('sub1', 'c1')), ('a', 'c2'),
... 'c1'})
True
>>> names = get_component_names(instance, True)
>>> frozenset(names) == frozenset()
True
>>> instance = {('relation1', ('sub1', 'c1'), 'o3'): True}
>>> names = get_component_names(instance, False)
>>> frozenset(names) == frozenset({'o3', ('relation1', ('sub1', 'c1'),
... 'o3'), 'sub1', ('sub1', 'c1'),
... 'c1', 'relation1'})
True
"""
names = set()
for attr in instance:
for name in get_attribute_components(attr, vars_only):
names.add(name)
return names
def rename_flat(target, mapping):
"""
Given an instance and a mapping rename the components and relations and
return the renamed instance.
:param instance: An instance to be renamed according to a mapping
:type instance: instance
:param mapping: :param mapping: A dictionary of mappings between component
names
:type mapping: dict
:return: A copy of the instance with components and relations renamed
:rtype: instance
>>> import pprint
>>> instance = {('a', '?c1'): 1, ('good', '?c1'): True}
>>> mapping = {'?c1': '?o1'}
>>> renamed = rename_flat(instance,mapping)
>>> pprint.pprint(renamed)
{('a', '?o1'): 1, ('good', '?o1'): True}
"""
temp_instance = {}
for attr in target:
if attr in mapping:
temp_instance[mapping[attr]] = target[attr]
elif isinstance(attr, tuple):
temp_instance[rename_relation(attr, mapping)] = target[attr]
else:
temp_instance[attr] = target[attr]
return temp_instance
def bind_flat_attr(attr, mapping):
"""
Renames an attribute given a mapping.
:param attr: The attribute to be renamed
:type attr: str or tuple
:param mapping: A dictionary of mappings between component names
:type mapping: dict
:param unnamed: A list of components that are not yet mapped.
:type unnamed: dict
:return: The attribute's new name or ``None`` if the mapping is incomplete
:rtype: str
>>> attr = ('before', '?c1', '?c2')
>>> mapping = {'?c1': '?o1', '?c2':'?o2'}
>>> bind_flat_attr(attr, mapping)
('before', '?o1', '?o2')
>>> attr = ('ordered-list', ('cells', '?obj12'), '?obj10', '?obj11')
>>> mapping = {'?obj12': '?o1', '?obj10':'?o2', '?obj11': '?o3'}
>>> bind_flat_attr(attr, mapping)
('ordered-list', ('cells', '?o1'), '?o2', '?o3')
If the mapping is incomplete then returns partially mapped attributes
>>> attr = ('before', '?c1', '?c2')
>>> mapping = {'?c1': 'o1'}
>>> bind_flat_attr(attr, mapping)
('before', 'o1', '?c2')
>>> bind_flat_attr(('<', ('a', '?o2'), ('a', '?o1')), {'?o1': '?c1'})
('<', ('a', '?o2'), ('a', '?c1'))
>>> bind_flat_attr(('<', ('a', '?o2'), ('a', '?o1')),
... {'?o1': '?c1', '?o2': '?c2'})
('<', ('a', '?c2'), ('a', '?c1'))
"""
return tuple([bind_flat_attr(ele, mapping) if isinstance(ele, tuple)
else mapping[ele] if ele in mapping else ele for ele in
attr])
def contains_component(component, attr):
"""
Return ``True`` if the given component name is in the attribute, either as
part of a hierarchical name or within a relations otherwise ``False``.
:param component: A component name
:type component: str
:param attr: An attribute name
:type atte: str
:return: ``True`` if the component name exists inside the attribute name
``False`` otherwise
:rtype: bool
>>> contains_component('?c1', ('relation', '?c2', ('a', '?c1')))
True
>>> contains_component('?c3', ('before', '?c1', '?c2'))
False
"""
if isinstance(attr, tuple):
for ele in attr:
if contains_component(component, ele) is True:
return True
return attr == component
def flat_match(target, base, initial_mapping=None):
"""
Given a base (usually concept) and target (instance or concept av table)
this function returns a mapping that can be used to rename components in
the target. Search is used to find a mapping that maximizes the expected
number of correct guesses in the concept after incorporating the instance.
The current approach is to refine the initially provided mapping using a
local hill-climbing search. If no initial mapping is provided then one is
generated using the Munkres / Hungarian matching on object-to-object
assignment (no relations). This initialization approach is polynomial in
the size of the base.
:param target: An instance or concept.av_counts object to be mapped to the
base concept.
:type target: :ref:`Instance<instance-rep>` or av_counts obj from concept
:param base: A concept to map the target to
:type base: TrestleNode
:param initial_mapping: An initial mapping to seed the local search
:type initial_mapping: A mapping dict
:return: a mapping for renaming components in the instance.
:rtype: dict
"""
inames = frozenset(get_component_names(target))
cnames = frozenset(get_component_names(base.av_counts))
if(len(inames) == 0 or len(cnames) == 0):
return {}
if len(inames.intersection(cnames)) > 0:
raise Exception("Objects in target and base must not collide. "
"Consider running NameStandardizer first.")
# TODO consider flipping target and base when one is larger than the other.
if initial_mapping is None:
initial_mapping = hungarian_mapping(inames, cnames, target, base)
else:
initial_mapping = frozenset([(a, v) for a, v in initial_mapping if a in
inames and v in cnames])
unmapped = cnames - frozenset(dict(initial_mapping).values())
# print("MATCHING", initial_mapping, target, base)
initial_cost = mapping_cost(initial_mapping, target, base)
op_problem = StructureMappingOptProblem((initial_mapping, unmapped),
initial_cost=initial_cost,
extra=(target, base))
solution = next(hill_climbing(op_problem))
return dict(solution.state_node.state[0])
def hungarian_mapping(inames, cnames, target, base):
"""
Utilizes the hungarian/munkres matching algorithm to compute an initial
mapping of inames to cnames. The base cost is the expected correct guesses
if each object is matched to itself (i.e., a new object). Then the cost of
each object-object match is evaluated by setting each individual object and
computing the expected correct guesses.
:param inames: the target component names
:type inames: collection
:param cnames: the base component names
:type cnames: collection
:param target: An instance or concept.av_counts object to be mapped to the
base concept.
:type target: :ref:`Instance<instance-rep>` or av_counts obj from concept
:param base: A concept to map the target to
:type base: TrestleNode
:return: a mapping for renaming components in the instance.
:rtype: frozenset
"""
cnames = list(cnames)
inames = list(inames)
cost_matrix = []
for o in inames:
row = []
for c in cnames:
nm = {}
nm[o] = c
cost = mapping_cost({o: c}, target, base)
row.append(cost)
unmapped_cost = mapping_cost({}, target, base)
for other_o in inames:
if other_o == o:
row.append(unmapped_cost)
else:
row.append(float('inf'))
cost_matrix.append(row)
m = Munkres()
indices = m.compute(cost_matrix)
# comments for using scipy hungarian
# indices = linear_sum_assignment(cost_matrix)
mapping = {}
# for i in range(len(indices[0])):
# row = indices[0][i]
# col = indices[1][i]
for row, col in indices:
if col >= len(cnames):
mapping[inames[row]] = inames[row]
else:
mapping[inames[row]] = cnames[col]
return frozenset(mapping.items())
def mapping_cost(mapping, target, base):
"""
Used to evaluate a mapping between a target and a base. This is performed
by renaming the target using the mapping, adding it to the base and
evaluating the expected number of correct guesses in the newly updated
concept.
:param mapping: the mapping of target items to base items
:type mapping: frozenset or dict
:param target: the target
:type target: an instance or concept.av_counts
:param base: the base
:type base: a concept
"""
if isinstance(mapping, frozenset):
mapping = dict(mapping)
if not isinstance(mapping, dict):
raise Exception("mapping must be dict or frozenset")
renamed_target = rename_flat(target, mapping)
# Need to ensure structure mapping is not used internally here.
# (i.e., there is no infinite recrusion)
temp_base = Cobweb3Node()
temp_base.update_counts_from_node(base)
temp_base.tree = base.tree
# check if it is an av_counts table, then create concept to deal with it.
if isinstance(next(iter(renamed_target.values())), dict):
temp_target = Cobweb3Node()
temp_target.av_counts = renamed_target
temp_target.count = max([sum([renamed_target[attr][val].num if val ==
cv_key else renamed_target[attr][val] for
val in renamed_target[attr]]) for attr in
renamed_target])
temp_base.update_counts_from_node(temp_target)
else:
temp_base.increment_counts(renamed_target)
return -temp_base.expected_correct_guesses()
class StructureMappingOptProblem(Problem):
"""
A class for describing a structure mapping problem to be solved using the
`py_search <http://py-search.readthedocs.io/>`_ library. This class defines
the node_value, the successor, and goal_test methods used by the search
library.
Unlike StructureMappingProblem, this class uses a local search approach;
i.e., given an initial mapping it tries to improve the mapping by permuting
it.
"""
def goal_test(self, node, goal=None):
"""
This should always return False, so it never terminates early.
"""
return False
def node_value(self, node):
"""
The value of a node (based on mapping_cost).
"""
# return node.cost()
mapping, unmapped_cnames = node.state
target, base = node.extra
return mapping_cost(mapping, target, base)
def swap_two(self, o1, o2, mapping, unmapped_cnames, target, base, node):
"""
returns the child node generated from swapping two mappings.
"""
new_mapping = {a: mapping[a] for a in mapping}
if mapping[o2] == o2:
new_mapping[o1] = o1
else:
new_mapping[o1] = mapping[o2]
if mapping[o1] == o1:
new_mapping[o2] = o2
else:
new_mapping[o2] = mapping[o1]
new_mapping = frozenset(new_mapping.items())
return Node((new_mapping, unmapped_cnames), extra=node.extra)
def swap_unnamed(self, o1, o2, mapping, unmapped_cnames, target, base,
node):
"""
Returns the child node generated from assigning an unmapped component
object to one of the instance objects.
"""
new_mapping = {a: mapping[a] for a in mapping}
new_unmapped_cnames = set(unmapped_cnames)
new_unmapped_cnames.remove(o2)
if mapping[o1] != o1:
new_unmapped_cnames.add(new_mapping[o1])
new_mapping[o1] = o2
new_mapping = frozenset(new_mapping.items())
return Node((new_mapping,
frozenset(new_unmapped_cnames)), extra=node.extra)
def random_successor(self, node):
"""
Similar to the successor function, but generates only a single random
successor.
"""
mapping, unmapped_cnames = node.state
target, base = node.extra
mapping = dict(mapping)
o1 = choice(list(mapping))
while mapping[o1] == o1 and len(unmapped_cnames) == 0:
o1 = choice(list(mapping))
possible_flips = [v for v in mapping if (v != o1 and
not (mapping[o1] == o1 or
mapping[v] == v))]
if random() <= len(possible_flips) / (len(possible_flips) +
len(unmapped_cnames)):
o2 = choice(possible_flips)
return self.swap_two(o1, o2, mapping, unmapped_cnames, target,
base, node)
else:
o2 = choice(list(unmapped_cnames))
return self.swap_unnamed(o1, o2, mapping, unmapped_cnames, target,
base, node)
def successors(self, node):
"""
An iterator that returns all successors.
"""
mapping, unmapped_cnames = node.state
target, base = node.extra
mapping = dict(mapping)
for o1, o2 in combinations(mapping, 2):
if o1 == o2 or (mapping[o1] == o1 and mapping[o2] == o2):
continue
yield self.swap_two(o1, o2, mapping, unmapped_cnames, target,
base, node)
for o1 in mapping:
for o2 in unmapped_cnames:
yield self.swap_unnamed(o1, o2, mapping, unmapped_cnames,
target, base, node)
def is_partial_match(iAttr, cAttr, mapping):
"""
Returns True if the instance attribute (iAttr) partially matches the
concept attribute (cAttr) given the mapping.
:param iAttr: An attribute in an instance
:type iAttr: str or tuple
:param cAttr: An attribute in a concept
:type cAttr: str or tuple
:param mapping: A mapping between between attribute names
:type mapping: dict
:param unnamed: A list of components that are not yet mapped.
:type unnamed: dict
:return: ``True`` if the instance attribute matches the concept attribute
in the mapping otherwise ``False``
:rtype: bool
>>> is_partial_match(('<', ('a', '?o2'), ('a', '?o1')),
... ('<', ('a', '?c2'), ('b', '?c1')), {'?o1': '?c1'})
False
>>> is_partial_match(('<', ('a', '?o2'), ('a', '?o1')),
... ('<', ('a', '?c2'), ('a', '?c1')), {'?o1': '?c1'})
True
>>> is_partial_match(('<', ('a', '?o2'), ('a', '?o1')),
... ('<', ('a', '?c2'), ('a', '?c1')),
... {'?o1': '?c1', '?o2': '?c2'})
True
"""
if type(iAttr) != type(cAttr):
return False
if isinstance(iAttr, tuple) and len(iAttr) != len(cAttr):
return False
if isinstance(iAttr, tuple):
for i, v in enumerate(iAttr):
if not is_partial_match(iAttr[i], cAttr[i], mapping):
return False
return True
if iAttr[0] == '?' and iAttr in mapping:
return mapping[iAttr] == cAttr
if iAttr[0] == '?' and cAttr[0] == '?' and iAttr not in mapping:
return True
return iAttr == cAttr
class StructureMapper(Preprocessor):
"""
Structure maps an instance that has been appropriately preprocessed (i.e.,
standardized apart, flattened, subcomponent processed, and lists processed
out). Transform renames the instance based on this structure mapping, and
return the renamed instance.
:param base: A concept to structure map the instance to
:type base: TrestleNode
:param gensym: a function that returns unique object names (str) on each
call
:type gensym: function
:return: A flattened and mapped copy of the instance
:rtype: instance
"""
def __init__(self, base):
self.base = base
self.mapping = None
self.reverse_mapping = None
def get_mapping(self):
"""
Returns the currently established mapping.
:return: The current mapping.
:rtype: dict
"""
return {self.reverse_mapping[o]: o for o in self.reverse_mapping}
def transform(self, target, initial_mapping=None):
"""
Transforms a provided target (either an instance or an av_counts table
from a CobwebNode or Cobweb3Node).
:param target: An instance or av_counts table to rename to bring into
alignment with the provided base.
:type target: instance or av_counts table (from CobwebNode or
Cobweb3Node).
:return: The renamed instance or av_counts table
:rtype: instance or av_counts table
"""
self.mapping = flat_match(target, self.base, initial_mapping)
self.reverse_mapping = {self.mapping[o]: o for o in self.mapping}
return rename_flat(target, self.mapping)
def undo_transform(self, target):
"""
Takes a transformed target and reverses the structure mapping using the
mapping discovered by transform.
:param target: A previously renamed instance or av_counts table to
reverse the structure mapping on.
:type target: previously structure mapped instance or av_counts table
(from CobwebNode or Cobweb3Node).
:return: An instance or concept av_counts table with original object
names
:rtype: dict
"""
if self.reverse_mapping is None:
raise Exception("Must transform before undoing transform")
return rename_flat(target, self.reverse_mapping)
| 36.099819 | 79 | 0.613896 |
f184ff2b8cb318f713241ef5bd309aed1f147681
| 1,847 |
py
|
Python
|
battery.py
|
xenbyte/i3blocks-battery
|
63d890a5983febb48f87c4778564f04878c7e767
|
[
"MIT"
] | null | null | null |
battery.py
|
xenbyte/i3blocks-battery
|
63d890a5983febb48f87c4778564f04878c7e767
|
[
"MIT"
] | null | null | null |
battery.py
|
xenbyte/i3blocks-battery
|
63d890a5983febb48f87c4778564f04878c7e767
|
[
"MIT"
] | 1 |
2018-09-06T05:52:05.000Z
|
2018-09-06T05:52:05.000Z
|
#!/usr/bin/env python3
import subprocess
battery = subprocess.check_output(["acpi"], universal_newlines=True)
if not battery:
output = '<span font="FontAwesome" color="#fff">\f12a Error</span>'
else:
battery_information = battery.split("\n")[0].split(", ")
state = battery_information[0].split(":")[1]
percent = int(battery_information[1].rstrip("%"))
if state != " Full":
time = battery_information[2].split(" ")[0]
else:
time = "FULL"
class Status(object):
color = ""
icon = ""
percent = ""
time = ""
def __init__(self, color, icon, percent, time):
self.color = color
self.icon = icon
self.percent = str(percent) + "% "
self.time = time
def status_view(percent):
if percent <= 3:
return Status("#F30", "\uf244 ", percent, time)
if percent <= 5:
return Status("#F30", "\uf244 ", percent, time)
if percent <= 15:
return Status("#F30", "\uf243 ", percent, time)
if percent <= 25:
return Status("#FC0", "\uf243 ", percent, time)
if percent <= 50:
return Status("#FFF", "\uf242 ", percent, time)
if percent <= 75:
return Status("#FFF", "\uf241 ", percent, time)
return Status("#FFF", "\uf240 ", percent, time)
def state_view(state):
if state == " Discharging":
return ""
else:
return "\uf1e6"
status = status_view(percent)
template = '''<span font="FontAwesome"><span>{}</span><span color="{}">{}</span><span font="Roboto">{}<span>[{}]</span></span></span>'''
output = template.format(
state_view(state),
status.color,
status.icon,
str(status.percent),
status.time)
print(output)
| 29.790323 | 140 | 0.537087 |
46fb6a0c92cf540ff01c051570736f0d412e859e
| 1,315 |
py
|
Python
|
api_version.py
|
AinsteinAI/wayv_air_api
|
dcda0ada9801ecc18f06cdcf5588b4a599dae327
|
[
"MIT"
] | 1 |
2021-05-14T17:29:31.000Z
|
2021-05-14T17:29:31.000Z
|
api_version.py
|
AinsteinAI/wayv_air_api
|
dcda0ada9801ecc18f06cdcf5588b4a599dae327
|
[
"MIT"
] | 1 |
2021-05-26T13:27:33.000Z
|
2021-05-26T14:56:53.000Z
|
api_version.py
|
AinsteinAI/wayv_air_api
|
dcda0ada9801ecc18f06cdcf5588b4a599dae327
|
[
"MIT"
] | null | null | null |
'''
api_version.py
Copyright 2020, Ainstein Inc. All Rights Reserved
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Contact: [email protected]
This version number is specific to the code that is stored in the wayv_air_api
repo, and NOT in the wayv_air. This version number should increment for any PR
that changes code in the wayv_air_api repo.
'''
VERSION_MAJOR = '1' # increment this when non-backwards-compatible changes are made
VERSION_MINOR = '2' # increment this when backwards-compatible changes are made
VERSION_BF = '0' # increment this when bugs are fixed
class api_version():
def __init__(self):
self.version = (VERSION_MAJOR + '.'
+ VERSION_MINOR + '.'
+ VERSION_BF)
| 37.571429 | 84 | 0.739924 |
a85104da172675d057533dfd154feae3347f1d7c
| 3,236 |
py
|
Python
|
configs/recognition/tsm/tsm_nl_dot_product_r50_1x1x8_50e_kinetics400_rgb.py
|
Naoki-Wake/mmaction2
|
a2032605db82509744a18d993c94a06feb1efd15
|
[
"Apache-2.0"
] | 648 |
2021-06-24T19:33:09.000Z
|
2022-03-31T06:27:24.000Z
|
configs/recognition/tsm/tsm_nl_dot_product_r50_1x1x8_50e_kinetics400_rgb.py
|
jayleicn/mmaction2-1
|
0a6fde1abb8403f1f68b568f5b4694c6f828e27e
|
[
"Apache-2.0"
] | 53 |
2021-07-01T03:07:52.000Z
|
2022-03-27T16:15:29.000Z
|
configs/recognition/tsm/tsm_nl_dot_product_r50_1x1x8_50e_kinetics400_rgb.py
|
jayleicn/mmaction2-1
|
0a6fde1abb8403f1f68b568f5b4694c6f828e27e
|
[
"Apache-2.0"
] | 117 |
2021-06-25T01:22:32.000Z
|
2022-03-31T08:33:55.000Z
|
_base_ = [
'../../_base_/models/tsm_r50.py', '../../_base_/schedules/sgd_tsm_50e.py',
'../../_base_/default_runtime.py'
]
# model settings
model = dict(
backbone=dict(
non_local=((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0)),
non_local_cfg=dict(
sub_sample=True,
use_scale=False,
norm_cfg=dict(type='BN3d', requires_grad=True),
mode='dot_product')))
# dataset settings
dataset_type = 'RawframeDataset'
data_root = 'data/kinetics400/rawframes_train'
data_root_val = 'data/kinetics400/rawframes_val'
ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(
type='MultiScaleCrop',
input_size=224,
scales=(1, 0.875, 0.75, 0.66),
random_crop=False,
max_wh_scale_gap=1,
num_fixed_crops=13),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=8,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=8,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=8,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
pipeline=test_pipeline))
evaluation = dict(
interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'])
# runtime settings
work_dir = './work_dirs/tsm_nl_gaussian_r50_1x1x8_50e_kinetics400_rgb/'
| 33.020408 | 78 | 0.637824 |
379f320cc8d5231addd140113393c1e4c9758dbd
| 7,335 |
py
|
Python
|
neural_compressor/ux/utils/workload/tuning.py
|
intel/neural-compressor
|
16a4a12045fcb468da4d33769aff2c1a5e2ba6ba
|
[
"Apache-2.0"
] | 172 |
2021-09-14T18:34:17.000Z
|
2022-03-30T06:49:53.000Z
|
neural_compressor/ux/utils/workload/tuning.py
|
intel/lp-opt-tool
|
130eefa3586b38df6c0ff78cc8807ae273f6a63f
|
[
"Apache-2.0"
] | 40 |
2021-09-14T02:26:12.000Z
|
2022-03-29T08:34:04.000Z
|
neural_compressor/ux/utils/workload/tuning.py
|
intel/neural-compressor
|
16a4a12045fcb468da4d33769aff2c1a5e2ba6ba
|
[
"Apache-2.0"
] | 33 |
2021-09-15T07:27:25.000Z
|
2022-03-25T08:30:57.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021-2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration tuning module."""
from typing import Any, Dict, List, Optional, Union
from neural_compressor.ux.utils.exceptions import ClientErrorException
from neural_compressor.ux.utils.json_serializer import JsonSerializer
from neural_compressor.ux.utils.utils import (
parse_bool_value,
parse_to_float_list,
parse_to_string_list,
)
class Strategy(JsonSerializer):
"""Configuration Strategy class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize configuration Strategy class."""
super().__init__()
# [Required] One of neural_compressor.strategy.STRATEGIES
self.name: str = data.get("name", "basic")
self.sigopt_api_token: Optional[str] = data.get("sigopt_api_token", None)
self.accuracy_weight: Optional[float] = data.get("accuracy_weight", None)
self.latency_weight: Optional[float] = data.get("latency_weight", None)
class MultiObjectives(JsonSerializer):
"""Configuration MultiObjectives class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize configuration MultiObjectives class."""
super().__init__()
self._objective: List[str] = data.get("objective", [])
self._weight: List[float] = data.get("weight", [])
@property
def objective(self) -> List[str]:
"""Get objectives."""
return self._objective
@objective.setter
def objective(self, value: Union[None, str, List[str]]) -> None:
"""Set inputs value."""
self._objective = parse_to_string_list(value)
@property
def weight(self) -> List[float]:
"""Get weights."""
return self._weight
@weight.setter
def weight(self, value: Union[None, float, List[float]]) -> None:
"""Set weights value."""
self._weight = parse_to_float_list(value)
class AccCriterion(JsonSerializer):
"""Configuration AccCriterion class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize configuration AccCriterion class."""
super().__init__()
self.relative: Optional[float] = data.get(
"relative",
None,
) # [Optional] (INT8-FP32)/FP32
self.absolute: Optional[float] = data.get(
"absolute",
None,
) # [Optional] INT8-FP32
# Set default accuracy criterion to relative
if self.relative is None and self.absolute is None:
self.relative = 0.1
class ExitPolicy(JsonSerializer):
"""Configuration ExitPolicy class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize Configuration ExitPolicy class."""
super().__init__()
self.timeout: Optional[int] = data.get("timeout", None)
self.max_trials: Optional[int] = data.get("max_trials", None)
self.performance_only: Optional[bool] = data.get("performance_only", None)
class Workspace(JsonSerializer):
"""Configuration Workspace class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize Configuration Workspace class."""
super().__init__()
self.path: Optional[str] = data.get("path", None) # [Optional]
self.resume: Optional[str] = data.get("resume", None) # [Optional]
class Tuning(JsonSerializer):
"""Configuration Tuning class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize Configuration Tuning class."""
super().__init__()
self.strategy: Strategy = Strategy()
if data.get("strategy"):
self.strategy = Strategy(data.get("strategy", {}))
self.accuracy_criterion: AccCriterion = AccCriterion(
data.get("accuracy_criterion", {}),
)
self.multi_objectives: Optional[MultiObjectives] = None
if data.get("multi_objectives"):
self.multi_objectives = MultiObjectives(data.get("multi_objectives", {}))
self.exit_policy: Optional[ExitPolicy] = None
if data.get("exit_policy"):
self.exit_policy = ExitPolicy(data.get("exit_policy", {}))
self.random_seed: Optional[int] = data.get("random_seed", None)
self.tensorboard: Optional[bool] = data.get("tensorboard", None)
self.workspace: Optional[Workspace] = None
if data.get("workspace", {}):
self.workspace = Workspace(data.get("workspace", {}))
def set_timeout(self, timeout: int) -> None:
"""Update tuning timeout in config."""
try:
timeout = int(timeout)
if timeout < 0:
raise ValueError
except ValueError:
raise ClientErrorException(
"The timeout value is not valid. " "Timeout should be non negative integer.",
)
if self.exit_policy:
self.exit_policy.timeout = timeout
else:
self.exit_policy = ExitPolicy({"timeout": timeout})
def set_max_trials(self, max_trials: int) -> None:
"""Update max tuning trials in config."""
try:
max_trials = int(max_trials)
if max_trials < 0:
raise ValueError
except ValueError:
raise ClientErrorException(
"The max trials value is not valid. " "Max trials should be non negative integer.",
)
if self.exit_policy:
self.exit_policy.max_trials = max_trials
else:
self.exit_policy = ExitPolicy({"max_trials": max_trials})
def set_performance_only(self, performance_only: Any) -> None:
"""Update performance only flag in config."""
try:
performance_only = parse_bool_value(performance_only)
except ValueError:
raise ClientErrorException(
"The performance_only flag value is not valid. "
"Performance_ony should be a boolean.",
)
if self.exit_policy:
self.exit_policy.performance_only = performance_only
else:
self.exit_policy = ExitPolicy({"performance_only": performance_only})
def set_random_seed(self, random_seed: int) -> None:
"""Update random seed value in config."""
try:
random_seed = int(random_seed)
except ValueError:
raise ClientErrorException(
"The random seed value is not valid. " "Random seed should be an integer.",
)
self.random_seed = random_seed
def set_workspace(self, path: str) -> None:
"""Update tuning workspace path in config."""
if self.workspace is None:
self.workspace = Workspace()
self.workspace.path = path
| 35.780488 | 99 | 0.626994 |
c134de5205ad1bff48d4c2ab82d27fd640743916
| 3,239 |
py
|
Python
|
icees_api/trapi.py
|
davefol/icees-api
|
d2eb26a29f319efd56ad5216e661fa9ed62407b0
|
[
"MIT"
] | null | null | null |
icees_api/trapi.py
|
davefol/icees-api
|
d2eb26a29f319efd56ad5216e661fa9ed62407b0
|
[
"MIT"
] | null | null | null |
icees_api/trapi.py
|
davefol/icees-api
|
d2eb26a29f319efd56ad5216e661fa9ed62407b0
|
[
"MIT"
] | null | null | null |
"""TRAPI FastAPI wrapper."""
import os
from typing import Any, Dict, List, Optional
from fastapi import FastAPI
from fastapi.openapi.utils import get_openapi
from starlette.middleware.cors import CORSMiddleware
class TRAPI(FastAPI):
"""Translator Reasoner API - wrapper for FastAPI."""
required_tags = [
{"name": "translator"},
{"name": "trapi"},
]
def __init__(
self,
*args,
contact: Optional[Dict[str, Any]] = None,
terms_of_service: Optional[str] = None,
translator_component: Optional[str] = None,
translator_teams: Optional[List[str]] = None,
trapi_operations: Optional[List[str]] = None,
**kwargs,
):
super().__init__(
*args,
root_path_in_servers=False,
**kwargs,
)
self.contact = contact
self.terms_of_service = terms_of_service
self.translator_component = translator_component
self.translator_teams = translator_teams
self.trapi_operations = trapi_operations
CORS_OPTIONS = dict(
allow_origins=['*'],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
self.add_middleware(
CORSMiddleware,
**CORS_OPTIONS,
)
def openapi(self) -> Dict[str, Any]:
"""Build custom OpenAPI schema."""
if self.openapi_schema:
return self.openapi_schema
tags = self.required_tags
if self.openapi_tags:
tags += self.openapi_tags
openapi_schema = get_openapi(
title=self.title,
version=self.version,
openapi_version=self.openapi_version,
description=self.description,
routes=self.routes,
tags=tags,
)
openapi_schema["servers"] = self.servers
openapi_schema["info"]["x-translator"] = {
"component": self.translator_component,
"team": self.translator_teams,
"externalDocs": {
"description": "The values for component and team are restricted according to this external JSON schema. See schema and examples at url",
"url": "https://github.com/NCATSTranslator/translator_extensions/blob/production/x-translator/",
},
"infores": os.getenv("ICEES_INFORES_CURIE", "infores:icees")
}
openapi_schema["info"]["x-trapi"] = {
"version": "1.2.0",
"externalDocs": {
"description": "The values for version are restricted according to the regex in this external JSON schema. See schema and examples at url",
"url": "https://github.com/NCATSTranslator/translator_extensions/blob/production/x-trapi/",
},
"operations": [
"lookup",
],
}
if self.trapi_operations:
openapi_schema["info"]["x-trapi"]["operations"] = self.trapi_operations
openapi_schema["info"]["contact"] = self.contact
openapi_schema["info"]["termsOfService"] = self.terms_of_service
self.openapi_schema = openapi_schema
return self.openapi_schema
| 34.094737 | 155 | 0.590923 |
ea7ac069b2fffdf21d2f341b488415c31c9e9888
| 7,650 |
py
|
Python
|
dynamix/correlator/cuda.py
|
kif/dynamix
|
163323ccb083bb069d88f3b51c0dfb3141f5fd37
|
[
"MIT"
] | 3 |
2021-01-12T05:46:51.000Z
|
2021-04-13T15:05:42.000Z
|
dynamix/correlator/cuda.py
|
kif/dynamix
|
163323ccb083bb069d88f3b51c0dfb3141f5fd37
|
[
"MIT"
] | 18 |
2019-08-23T08:42:36.000Z
|
2021-09-13T15:14:38.000Z
|
dynamix/correlator/cuda.py
|
kif/dynamix
|
163323ccb083bb069d88f3b51c0dfb3141f5fd37
|
[
"MIT"
] | 6 |
2019-08-01T12:10:52.000Z
|
2021-12-09T14:55:02.000Z
|
import numpy as np
from ..utils import nextpow2, updiv, get_next_power
from .dense import MatMulCorrelator, FFTCorrelator
try:
from silx.math.fft.cufft import CUFFT
import pycuda.gpuarray as garray
from pycuda.compiler import SourceModule
from pycuda.driver import Memcpy2D, Memcpy3D
except ImportError:
CUFFT = None
try:
import skcuda.linalg as cublas
import skcuda.misc as skmisc
except ImportError:
cublas = None
class CublasMatMulCorrelator(MatMulCorrelator):
"""
The CublasMatMulCorrelator is a CUDA-accelerated version of MatMulCorrelator.
"""
def __init__(self, shape, nframes,
qmask=None,
scale_factor=None,
extra_options={}):
"""
Initialize a CUBLAS matrix multiplication correlator.
Please refer to the documentation of BaseCorrelator for the documentation
of each parameters.
Extra options
--------------
cublas_handle: int
If provided, use this cublas handle instead of creating a new one.
"""
if cublas is None:
raise ImportError("scikit-cuda is needed to use this correlator")
super().__init__(
shape, nframes,
qmask=qmask, scale_factor=scale_factor, extra_options=extra_options
)
self._init_cublas()
self._compile_kernels()
def _init_cublas(self):
import pycuda.autoinit
if "cublas_handle" in self.extra_options:
handle = self.extra_options["cublas_handle"]
else:
handle = skmisc._global_cublas_handle
if handle is None:
cublas.init() # cublas handle + allocator
handle = skmisc._global_cublas_handle
self.cublas_handle = handle
def _compile_kernels(self):
mod = SourceModule(
"""
// Extract the upper diagonals of a square (N, N) matrix.
__global__ void extract_upper_diags(float* matrix, float* diags, int N) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if ((x >= N) || (y >= N) || (y > x)) return;
int pos = y*N+x;
int my_diag = x-y;
diags[my_diag * N + x] = matrix[pos];
}
"""
)
self.extract_diags_kernel = mod.get_function("extract_upper_diags")
self._blocks = (32, 32, 1)
self._grid = (
updiv(self.nframes, self._blocks[0]),
updiv(self.nframes, self._blocks[1]),
1
)
self.d_diags = garray.zeros((self.nframes, self.nframes), dtype=np.float32)
self.d_sumdiags1 = garray.zeros(self.nframes, dtype=np.float32)
self.d_sumdiags2 = garray.zeros_like(self.d_sumdiags1)
self._kern_args = [
None,
self.d_diags,
np.int32(self.nframes),
]
def sum_diagonals(self, d_arr, d_out):
self.d_diags.fill(0)
self._kern_args[0] = d_arr.gpudata
self.extract_diags_kernel(*self._kern_args, grid=self._grid, block=self._blocks)
skmisc.sum(self.d_diags, axis=1, out=d_out)
def _correlate_matmul_cublas(self, frames_flat, mask):
arr = np.ascontiguousarray(frames_flat[:, mask], dtype=np.float32)
npix = arr.shape[1]
# Pre-allocating memory for all bins might save a bit of time,
# but would take more memory
d_arr = garray.to_gpu(arr)
d_outer = cublas.dot(d_arr, d_arr, transb="T", handle=self.cublas_handle)
d_means = skmisc.mean(d_arr, axis=1, keepdims=True)
d_denom_mat = cublas.dot(d_means, d_means, transb="T", handle=self.cublas_handle)
self.sum_diagonals(d_outer, self.d_sumdiags1)
self.sum_diagonals(d_denom_mat, self.d_sumdiags2)
self.d_sumdiags1 /= self.d_sumdiags2
self.d_sumdiags1 /= npix
return self.d_sumdiags1.get()
def correlate(self, frames):
res = np.zeros((self.n_bins, self.nframes), dtype=np.float32)
frames_flat = frames.reshape((self.nframes, -1))
for i, bin_val in enumerate(self.bins):
mask = (self.qmask.ravel() == bin_val)
res[i] = self._correlate_matmul_cublas(frames_flat, mask)
return res
class CUFFTCorrelator(FFTCorrelator):
def __init__(self, shape, nframes,
qmask=None,
weights=None,
scale_factor=None,
precompute_fft_plans=False,
extra_options={}):
super().__init__(
shape, nframes, qmask=qmask,
weights=weights, scale_factor=scale_factor,
precompute_fft_plans=precompute_fft_plans, extra_options=extra_options
)
if CUFFT is None:
raise ImportError("pycuda and scikit-cuda need to be installed")
if skmisc._global_cublas_handle is None:
cublas.init()
self._allocate_cuda_arrays()
self._compile_kernels()
def _create_fft_plan(self, npix):
return CUFFT(shape=(npix, self.Nf), dtype=np.float32, axes=(-1,))
def _allocate_cuda_arrays(self):
self.d_sums = garray.zeros(self.Nf, np.float32)
self.d_numerator = self.d_sums[self.nframes-1:self.nframes-1 + self.nframes] # view
self.d_sums_denom_tmp = garray.zeros(self.Nf, np.float32)
self.d_denom = garray.zeros(self.nframes, np.float32)
def _compile_kernels(self):
mod = SourceModule(
"""
// 1D correlation of a N samples array
__global__ void corr1D(float* arr, float* out, int N, float scale_factor) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= N) return;
float s = 0.0f;
for (int j = i; j < N; j++) s += arr[j] * arr[j-i];
out[i] = s/scale_factor;
}
"""
)
self.corr1D_kernel = mod.get_function("corr1D")
self._blocks = (32, 1, 1) # tune ?
self._grid = (updiv(self.nframes, self._blocks[0]), 1, 1)
def _correlate_denom(self, npix):
scale_factor = np.float32(npix)
self.corr1D_kernel(
self.d_sums_denom_tmp,
self.d_denom,
np.int32(self.nframes),
scale_factor,
grid=self._grid, block=self._blocks
)
def _correlate_fft(self, frames_flat, cufft_plan):
npix = frames_flat.shape[1]
d_in = cufft_plan.data_in
d_in.fill(0)
f_out1 = cufft_plan.data_out
f_out2 = garray.zeros_like(cufft_plan.data_out)
# fft(pad(frames_flat), axis=1)
d_in[:, :self.nframes] = frames_flat.T.astype("f")
f_out1 = cufft_plan.fft(d_in, output=f_out1)
# frames_flat.sum(axis=1)
# skmisc.sum() only works on base data, not gpuarray views,
# so we sum on the whole array and then extract the right subset.
skmisc.sum(d_in, axis=0, out=self.d_sums_denom_tmp)
# fft(pad(frames_flat[::-1]), axis=1)
d_in.fill(0)
d_in[:, :self.nframes] = frames_flat.T[:, ::-1].astype("f")
f_out2 = cufft_plan.fft(d_in, output=f_out2)
# product, ifft
f_out1 *= f_out2
num = cufft_plan.ifft(f_out1, output=d_in)
# numerator of g_2
skmisc.sum(num, axis=0, out=self.d_sums)
# denominator of g_2: correlate(d_sums_denom)
self._correlate_denom(npix)
self.d_numerator /= self.d_denom
res = self.d_numerator.get()
return res
| 34.615385 | 91 | 0.597255 |
c8c2b1b8bb4f2e19ccaf5e090680b1f272e2a475
| 4,780 |
py
|
Python
|
libartipy/IO/input_output.py
|
Artisense-ai/libartipy
|
7a6a7736637c106e7e9c6763ec8c1dea64db4b01
|
[
"MIT"
] | 5 |
2021-06-15T13:01:51.000Z
|
2021-12-04T04:26:08.000Z
|
libartipy/IO/input_output.py
|
Artisense-ai/libartipy
|
7a6a7736637c106e7e9c6763ec8c1dea64db4b01
|
[
"MIT"
] | null | null | null |
libartipy/IO/input_output.py
|
Artisense-ai/libartipy
|
7a6a7736637c106e7e9c6763ec8c1dea64db4b01
|
[
"MIT"
] | 3 |
2021-06-18T10:37:04.000Z
|
2021-11-15T05:55:29.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Dedicated to reading/writing to files.
# Author Dmytro Bobkov, 2019
import argparse
import os
from typing import List, Tuple, Any
import matplotlib.pyplot as plt
import numpy as np
import cv2
def get_timestamp_from_keyframe_filename(filename: str) -> int:
"""
Extract timestamp from keyframe filename based on some naming conventions
:param filename:
:return:
"""
_, ext = os.path.splitext(filename)
assert ext == ".txt"
ts = get_timestamp_from_filename(filename)
return ts
def filename_sort(file_list: List[str]) -> List[str]:
"""
checks first two elements for same word prefix and sorts keyframe files by their
timestamps whilst maintaining their absolute path
:param file_list: list of filenames
:return: sorted list of filenames
"""
assert len(file_list) > 1, 'for one element, cannot sort.'
# split filename in the form KeyFrame_123.txt into KeyFrame and 123.txt
filename = os.path.basename(file_list[0])
word_prefix = filename.split('_')[0]
assert word_prefix in file_list[1]
sorted_list = sorted(file_list,
key=lambda fname: get_timestamp_from_keyframe_filename(fname))
assert len(sorted_list) == len(file_list)
return sorted_list
def convert_color_from_hex_to_rgb(value: str) -> List[int]:
"""
converts a hex encoded colors to rgb encoded colors
:param value: hex encoded color
:return: rgb value
"""
value = value.lstrip('#')
lv = len(value)
return list(tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3)))
def get_timestamp_from_filename(filename: str) -> int:
"""
:param filename: filepath
:return: timestamp
:raise AssertionError if conversion fails
"""
base = os.path.basename(filename)
file_only, _ = os.path.splitext(base)
parts = file_only.split("_")
assert len(parts) > 0, "No TS in {}".format(file_only)
numerical_part = parts[-1] # Timestamp on the last position in filename
assert numerical_part.isdigit(), 'File {} does not contain number on the last position'.format(file_only)
ts = int(numerical_part)
# TODO(Dmytro) handle with exception except ValueError:
return ts
def data_to_16bit_png(filepath: str, img_array: np.array) -> str:
"""
Saves arrays to file as 16 bit png
:param filepath:
:param img_array: expects img_array to be np.uint16
:return: path to the file where it is written
"""
assert img_array.dtype == np.uint16, "Input array type is not np.uint16!"
filepath = filepath + ".png"
cv2.imwrite(filepath, img_array.astype(np.uint16))
return filepath
def encode_data_image_to_16bit(data_image: np.ndarray, max_data_value: int = 120) -> np.ndarray:
"""
this method sets all data values above max_data_value to zero, scales it by the max_data_value
and rescales the depth image to the uint16 range.
:param data_image:
:param max_data_value:
:return: image data in 16-bit format
"""
# only consider depth values within max distance and normalize on that
data_image[data_image > max_data_value] = 0
data_image = data_image / max_data_value
# scale depth image to uint16 range
data_image = data_image * np.iinfo(np.uint16).max
return data_image.astype(np.uint16)
def decode_data_image_from_16bit_file(fpath: str, max_data_value: int = 120) -> np.ndarray:
"""
this method performs inverse operation of encode_data_image, by unscaling uint16 range,
and rescaling the data_image with max_data_value.
:param fpath: filename
:param max_data_value: maximum value of range
:return: image data as array
"""
# read uint16 depht values unchanged from file
data_image = cv2.imread(fpath, cv2.IMREAD_UNCHANGED)
# undo uint16 range and scale with max depth
data_image = data_image / np.iinfo(np.uint16).max
data_image *= max_data_value
return data_image
def image_to_plasma_png(fname: str, img: np.array) -> None:
"""
saves images to file using the plasma color scheme
:param fname:
:param img:
"""
plt.imsave(fname + '.png', img, cmap='plasma')
def str2bool(v: Any) -> bool:
"""
Converts multiple possible boolean string encryptions to pythonic True and False.
:param v: string or boolean
:return: true or false depending on the given value
:raise ArgumentTypeError if conversion fails
"""
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
| 29.325153 | 109 | 0.683264 |
c4c8a4bc92dde7a48dfda4091995c104303397a4
| 2,456 |
py
|
Python
|
launcher/config.py
|
jt6562/XX-Net
|
7b78e4820a3c78c3ba3e75b3917129d17f00e9fc
|
[
"BSD-2-Clause"
] | 2 |
2021-07-09T03:41:31.000Z
|
2021-10-04T17:54:36.000Z
|
launcher/config.py
|
jt6562/XX-Net
|
7b78e4820a3c78c3ba3e75b3917129d17f00e9fc
|
[
"BSD-2-Clause"
] | null | null | null |
launcher/config.py
|
jt6562/XX-Net
|
7b78e4820a3c78c3ba3e75b3917129d17f00e9fc
|
[
"BSD-2-Clause"
] | 1 |
2019-06-13T06:33:37.000Z
|
2019-06-13T06:33:37.000Z
|
import os
from instances import xlog
import yaml
from distutils.version import LooseVersion
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath( os.path.join(current_path, os.pardir))
data_path = os.path.join(root_path, 'data')
config_path = os.path.join(data_path, 'launcher', 'config.yaml')
config = {}
def load():
global config, config_path
try:
config = yaml.load(file(config_path, 'r'))
#print yaml.dump(config)
except Exception as exc:
print "Error in configuration file:", exc
def save():
global config, config_path
try:
yaml.dump(config, file(config_path, "w"))
except Exception as e:
xlog.warn("save config %s fail %s", config_path, e)
def get(path, default_val=""):
global config
try:
value = default_val
cmd = "config"
for p in path:
cmd += '["%s"]' % p
value = eval(cmd)
return value
except:
return default_val
def _set(m, k_list, v):
k0 = k_list[0]
if len(k_list) == 1:
m[k0] = v
return
if k0 not in m:
m[k0] = {}
_set(m[k0], k_list[1:], v)
def set(path, val):
global config
_set(config, path, val)
def recheck_module_path():
global config
need_save_config = False
modules = ["gae_proxy", "launcher", "php_proxy", "x_tunnel"]
for module in modules:
if module not in ["launcher", "php_proxy", "x_tunnel"]:
if not os.path.isdir(os.path.join(root_path, module)):
del config[module]
continue
if get(["modules", module, "auto_start"], -1) == -1:
set(["modules", module, "auto_start"], 1)
if get(["modules", "launcher", "control_port"], 0) == 0:
set(["modules", "launcher", "control_port"], 8085)
set(["modules", "launcher", "allow_remote_connect"], 0)
if get(["modules", "launcher", "proxy"], 0) == 0:
# default enable PAC on startup.
set(["modules", "launcher", "proxy"], "pac")
#if get(["modules", "gae_proxy", "control_port"], 0) == 0:
# set(["modules", "gae_proxy", "control_port"], 8084)
if get(["modules", "php_proxy", "control_port"], 0) == 0:
set(["modules", "php_proxy", "control_port"], 8083)
return need_save_config
def init():
if os.path.isfile(config_path):
load()
if recheck_module_path():
save()
init()
| 25.319588 | 67 | 0.585098 |
82819dface5824078c96fbef0e10da078e11e5b2
| 11,987 |
py
|
Python
|
vumi/transports/mtech_ussd/tests/test_mtech_ussd.py
|
apopheniac/vumi
|
e04bf32a0cf09292f03dfe8628798adff512b709
|
[
"BSD-3-Clause"
] | null | null | null |
vumi/transports/mtech_ussd/tests/test_mtech_ussd.py
|
apopheniac/vumi
|
e04bf32a0cf09292f03dfe8628798adff512b709
|
[
"BSD-3-Clause"
] | null | null | null |
vumi/transports/mtech_ussd/tests/test_mtech_ussd.py
|
apopheniac/vumi
|
e04bf32a0cf09292f03dfe8628798adff512b709
|
[
"BSD-3-Clause"
] | null | null | null |
from twisted.internet.defer import inlineCallbacks
from twisted.trial.unittest import TestCase
from vumi.transports.tests.utils import TransportTestCase
from vumi.utils import http_request_full
from vumi.message import TransportUserMessage
from vumi.transports.mtech_ussd import MtechUssdTransport
from vumi.transports.mtech_ussd.mtech_ussd import MtechUssdResponse
class TestMtechUssdTransport(TransportTestCase):
timeout = 1
transport_name = 'mtech_ussd'
transport_class = MtechUssdTransport
@inlineCallbacks
def setUp(self):
yield super(TestMtechUssdTransport, self).setUp()
self.config = {
'transport_type': 'ussd',
'ussd_string_prefix': '*120*666#',
'web_path': "/foo",
'web_host': "localhost",
'web_port': 0,
'username': 'testuser',
'password': 'testpass',
}
self.transport = yield self.get_transport(self.config)
self.transport_url = self.transport.get_transport_url().rstrip('/')
self.url = "%s%s" % (self.transport_url, self.config['web_path'])
yield self.transport.session_manager.redis._purge_all() # just in case
def make_ussd_request_full(self, session_id, **kwargs):
lines = [
'<?xml version="1.0" encoding="UTF-8"?>',
'<page version="2.0">',
' <session_id>%s</session_id>' % (session_id,),
]
for k, v in kwargs.items():
lines.append(' <%s>%s</%s>' % (k, v, k))
lines.append('</page>')
data = '\n'.join(lines)
return http_request_full(self.url, data, method='POST')
def make_ussd_request(self, session_id, **kwargs):
return self.make_ussd_request_full(session_id, **kwargs).addCallback(
lambda r: r.delivered_body)
def reply_to_message(self, *args, **kw):
d = self.wait_for_dispatched_messages(1)
def reply(r):
msg = TransportUserMessage(**r[0].payload)
self.dispatch(msg.reply(*args, **kw))
return msg
return d.addCallback(reply)
@inlineCallbacks
def test_empty_request(self):
response = yield http_request_full(self.url, "", method='POST')
self.assertEqual(response.code, 400)
@inlineCallbacks
def test_bad_request(self):
response = yield http_request_full(self.url, "blah", method='POST')
self.assertEqual(response.code, 400)
@inlineCallbacks
def test_inbound_new_continue(self):
sid = 'a41739890287485d968ea66e8b44bfd3'
response_d = self.make_ussd_request(
sid, mobile_number='2348085832481', page_id='0',
data='testmenu', gate='gateid')
msg = yield self.reply_to_message("OK\n1 < 2")
self.assertEqual(msg['transport_name'], self.transport_name)
self.assertEqual(msg['transport_type'], "ussd")
self.assertEqual(msg['transport_metadata'], {"session_id": sid})
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_NEW)
self.assertEqual(msg['from_addr'], '2348085832481')
# self.assertEqual(msg['to_addr'], '*120*666#')
self.assertEqual(msg['content'], 'testmenu')
response = yield response_d
correct_response = ''.join([
"<?xml version='1.0' encoding='UTF-8'?>",
'<page version="2.0">',
'<session_id>a41739890287485d968ea66e8b44bfd3</session_id>',
'<div>OK<br />1 < 2</div>',
'<navigation>',
'<link accesskey="*" pageId="indexX" />',
'</navigation>',
'</page>',
])
self.assertEqual(response, correct_response)
@inlineCallbacks
def test_inbound_resume_continue(self):
sid = 'a41739890287485d968ea66e8b44bfd3'
yield self.transport.save_session(sid, '2348085832481', '*120*666#')
response_d = self.make_ussd_request(sid, page_id="indexX", data="foo")
msg = yield self.reply_to_message("OK")
self.assertEqual(msg['transport_name'], self.transport_name)
self.assertEqual(msg['transport_type'], "ussd")
self.assertEqual(msg['transport_metadata'], {"session_id": sid})
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_RESUME)
self.assertEqual(msg['from_addr'], '2348085832481')
self.assertEqual(msg['to_addr'], '*120*666#')
self.assertEqual(msg['content'], 'foo')
response = yield response_d
correct_response = ''.join([
"<?xml version='1.0' encoding='UTF-8'?>",
'<page version="2.0">',
'<session_id>a41739890287485d968ea66e8b44bfd3</session_id>',
'<div>OK</div>',
'<navigation>',
'<link accesskey="*" pageId="indexX" />',
'</navigation>',
'</page>',
])
self.assertEqual(response, correct_response)
@inlineCallbacks
def test_nack(self):
msg = self.mkmsg_out()
yield self.dispatch(msg)
[nack] = yield self.wait_for_dispatched_events(1)
self.assertEqual(nack['user_message_id'], msg['message_id'])
self.assertEqual(nack['sent_message_id'], msg['message_id'])
self.assertEqual(nack['nack_reason'],
'Missing in_reply_to, content or session_id')
@inlineCallbacks
def test_inbound_missing_session(self):
sid = 'a41739890287485d968ea66e8b44bfd3'
response = yield self.make_ussd_request_full(
sid, page_id="indexX", data="foo")
self.assertEqual(400, response.code)
self.assertEqual('', response.delivered_body)
@inlineCallbacks
def test_inbound_new_and_resume(self):
sid = 'a41739890287485d968ea66e8b44bfd3'
response_d = self.make_ussd_request(
sid, mobile_number='2348085832481', page_id='0',
data='testmenu', gate='gateid')
msg = yield self.reply_to_message("OK\n1 < 2")
self.assertEqual(msg['transport_name'], self.transport_name)
self.assertEqual(msg['transport_type'], "ussd")
self.assertEqual(msg['transport_metadata'], {"session_id": sid})
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_NEW)
self.assertEqual(msg['from_addr'], '2348085832481')
# self.assertEqual(msg['to_addr'], '*120*666#')
self.assertEqual(msg['content'], 'testmenu')
response = yield response_d
correct_response = ''.join([
"<?xml version='1.0' encoding='UTF-8'?>",
'<page version="2.0">',
'<session_id>a41739890287485d968ea66e8b44bfd3</session_id>',
'<div>OK<br />1 < 2</div>',
'<navigation>',
'<link accesskey="*" pageId="indexX" />',
'</navigation>',
'</page>',
])
self.assertEqual(response, correct_response)
self.clear_all_dispatched()
response_d = self.make_ussd_request(sid, page_id="indexX", data="foo")
msg = yield self.reply_to_message("OK")
self.assertEqual(msg['transport_name'], self.transport_name)
self.assertEqual(msg['transport_type'], "ussd")
self.assertEqual(msg['transport_metadata'], {"session_id": sid})
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_RESUME)
self.assertEqual(msg['from_addr'], '2348085832481')
self.assertEqual(msg['to_addr'], 'gateid')
self.assertEqual(msg['content'], 'foo')
response = yield response_d
correct_response = ''.join([
"<?xml version='1.0' encoding='UTF-8'?>",
'<page version="2.0">',
'<session_id>a41739890287485d968ea66e8b44bfd3</session_id>',
'<div>OK</div>',
'<navigation>',
'<link accesskey="*" pageId="indexX" />',
'</navigation>',
'</page>',
])
self.assertEqual(response, correct_response)
@inlineCallbacks
def test_inbound_resume_close(self):
sid = 'a41739890287485d968ea66e8b44bfd3'
yield self.transport.save_session(sid, '2348085832481', '*120*666#')
response_d = self.make_ussd_request(sid, page_id="indexX", data="foo")
msg = yield self.reply_to_message("OK", False)
self.assertEqual(msg['transport_name'], self.transport_name)
self.assertEqual(msg['transport_type'], "ussd")
self.assertEqual(msg['transport_metadata'], {"session_id": sid})
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_RESUME)
self.assertEqual(msg['from_addr'], '2348085832481')
self.assertEqual(msg['to_addr'], '*120*666#')
self.assertEqual(msg['content'], 'foo')
response = yield response_d
correct_response = ''.join([
"<?xml version='1.0' encoding='UTF-8'?>",
'<page version="2.0">',
'<session_id>a41739890287485d968ea66e8b44bfd3</session_id>',
'<div>OK</div>',
'</page>',
])
self.assertEqual(response, correct_response)
@inlineCallbacks
def test_inbound_cancel(self):
sid = 'a41739890287485d968ea66e8b44bfd3'
yield self.transport.save_session(sid, '2348085832481', '*120*666#')
response = yield self.make_ussd_request(sid, status="1")
correct_response = ''.join([
"<?xml version='1.0' encoding='UTF-8'?>",
'<page version="2.0">',
'<session_id>a41739890287485d968ea66e8b44bfd3</session_id>',
'</page>',
])
self.assertEqual(response, correct_response)
class TestMtechUssdResponse(TestCase):
def setUp(self):
self.mur = MtechUssdResponse("sid123")
def assert_message_xml(self, *lines):
xml_str = ''.join(
["<?xml version='1.0' encoding='UTF-8'?>"] + list(lines))
self.assertEqual(self.mur.to_xml(), xml_str)
def test_empty_response(self):
self.assert_message_xml(
'<page version="2.0">',
'<session_id>sid123</session_id>',
'</page>')
def test_free_text(self):
self.mur.add_text("Please enter your name")
self.mur.add_freetext_option()
self.assert_message_xml(
'<page version="2.0">',
'<session_id>sid123</session_id>',
'<div>Please enter your name</div>',
'<navigation><link accesskey="*" pageId="indexX" /></navigation>',
'</page>')
def test_menu_options(self):
self.mur.add_text("Please choose:")
self.mur.add_menu_item('chicken', '1')
self.mur.add_menu_item('beef', '2')
self.assert_message_xml(
'<page version="2.0">',
'<session_id>sid123</session_id>',
'<div>Please choose:</div>',
'<navigation>',
'<link accesskey="1" pageId="index1">chicken</link>',
'<link accesskey="2" pageId="index2">beef</link>',
'</navigation>',
'</page>')
def test_menu_options_title(self):
self.mur.add_title("LUNCH")
self.mur.add_text("Please choose:")
self.mur.add_menu_item('chicken', '1')
self.mur.add_menu_item('beef', '2')
self.assert_message_xml(
'<page version="2.0">',
'<session_id>sid123</session_id>',
'<title>LUNCH</title>',
'<div>Please choose:</div>',
'<navigation>',
'<link accesskey="1" pageId="index1">chicken</link>',
'<link accesskey="2" pageId="index2">beef</link>',
'</navigation>',
'</page>')
| 39.301639 | 79 | 0.586719 |
e60c91968a1fb9fa82bc36be22677bc037376821
| 2,320 |
py
|
Python
|
tests/test_integration_unet_2d.py
|
function2-llx/MONAI
|
4cddaa830b61b88ec78e089bb5f21e05bb1a78f4
|
[
"Apache-2.0"
] | 3 |
2020-06-22T20:59:14.000Z
|
2021-04-09T21:24:45.000Z
|
tests/test_integration_unet_2d.py
|
scf819/MONAI
|
2fef7ff5c064a9ff6b6d6b4f2323180afed99934
|
[
"Apache-2.0"
] | null | null | null |
tests/test_integration_unet_2d.py
|
scf819/MONAI
|
2fef7ff5c064a9ff6b6d6b4f2323180afed99934
|
[
"Apache-2.0"
] | 1 |
2020-06-22T19:22:59.000Z
|
2020-06-22T19:22:59.000Z
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from ignite.engine import create_supervised_trainer
from torch.utils.data import DataLoader, Dataset
from monai.data import create_test_image_2d
from monai.losses import DiceLoss
from monai.networks.nets import BasicUNet, UNet
from tests.utils import DistTestCase, TimedCall, skip_if_quick
def run_test(net_name="basicunet", batch_size=64, train_steps=100, device="cuda:0"):
class _TestBatch(Dataset):
def __getitem__(self, _unused_id):
im, seg = create_test_image_2d(128, 128, noise_max=1, num_objs=4, num_seg_classes=1)
return im[None], seg[None].astype(np.float32)
def __len__(self):
return train_steps
if net_name == "basicunet":
net = BasicUNet(spatial_dims=2, in_channels=1, out_channels=1, features=(4, 8, 8, 16, 16, 32))
elif net_name == "unet":
net = UNet(
spatial_dims=2, in_channels=1, out_channels=1, channels=(4, 8, 16, 32), strides=(2, 2, 2), num_res_units=2
)
net.to(device)
loss = DiceLoss(sigmoid=True)
opt = torch.optim.Adam(net.parameters(), 1e-4)
src = DataLoader(_TestBatch(), batch_size=batch_size)
trainer = create_supervised_trainer(net, opt, loss, device, False)
trainer.run(src, 1)
loss = trainer.state.output
return loss
@skip_if_quick
class TestIntegrationUnet2D(DistTestCase):
@TimedCall(seconds=20, daemon=False)
def test_unet_training(self):
for n in ["basicunet", "unet"]:
loss = run_test(net_name=n, device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu:0"))
print(loss)
self.assertGreaterEqual(0.85, loss)
if __name__ == "__main__":
unittest.main()
| 35.692308 | 118 | 0.705603 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.