hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7cd7090a5fd5c70d3b7355b87412addc028795b9
| 3,539 |
py
|
Python
|
bindings/python/ensmallen/datasets/string/trichinellaspiralis.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5 |
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/trichinellaspiralis.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18 |
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/trichinellaspiralis.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3 |
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Trichinella spiralis.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def TrichinellaSpiralis(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Trichinella spiralis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Trichinella spiralis graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="TrichinellaSpiralis",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.768519 | 223 | 0.676745 |
8c217c4b4ca7d564cfb3d1430a6726998a9e8e0d
| 7,607 |
py
|
Python
|
textattack/commands/train_model/train_args_helpers.py
|
ashwani-bhat/TextAttack
|
9f5c0794b95779f11bf2a120642db00da2bc4928
|
[
"MIT"
] | null | null | null |
textattack/commands/train_model/train_args_helpers.py
|
ashwani-bhat/TextAttack
|
9f5c0794b95779f11bf2a120642db00da2bc4928
|
[
"MIT"
] | null | null | null |
textattack/commands/train_model/train_args_helpers.py
|
ashwani-bhat/TextAttack
|
9f5c0794b95779f11bf2a120642db00da2bc4928
|
[
"MIT"
] | null | null | null |
import os
import textattack
from textattack.commands.attack.attack_args import ATTACK_RECIPE_NAMES
from textattack.commands.augment import AUGMENTATION_RECIPE_NAMES
logger = textattack.shared.logger
def prepare_dataset_for_training(nlp_dataset):
"""Changes an `nlp` dataset into the proper format for tokenization."""
def prepare_example_dict(ex):
"""Returns the values in order corresponding to the data.
ex:
'Some text input'
or in the case of multi-sequence inputs:
('The premise', 'the hypothesis',)
etc.
"""
values = list(ex.values())
if len(values) == 1:
return values[0]
return tuple(values)
text, outputs = zip(*((prepare_example_dict(x[0]), x[1]) for x in nlp_dataset))
return list(text), list(outputs)
def dataset_from_args(args):
"""Returns a tuple of ``HuggingFaceNlpDataset`` for the train and test
datasets for ``args.dataset``."""
dataset_args = args.dataset.split(":")
# TODO `HuggingFaceNlpDataset` -> `HuggingFaceDataset`
if args.dataset_train_split:
train_dataset = textattack.datasets.HuggingFaceNlpDataset(
*dataset_args, split=args.dataset_train_split
)
else:
try:
train_dataset = textattack.datasets.HuggingFaceNlpDataset(
*dataset_args, split="train"
)
args.dataset_train_split = "train"
except KeyError:
raise KeyError(f"Error: no `train` split found in `{args.dataset}` dataset")
train_text, train_labels = prepare_dataset_for_training(train_dataset)
if args.dataset_dev_split:
eval_dataset = textattack.datasets.HuggingFaceNlpDataset(
*dataset_args, split=args.dataset_dev_split
)
else:
# try common dev split names
try:
eval_dataset = textattack.datasets.HuggingFaceNlpDataset(
*dataset_args, split="dev"
)
args.dataset_dev_split = "dev"
except KeyError:
try:
eval_dataset = textattack.datasets.HuggingFaceNlpDataset(
*dataset_args, split="eval"
)
args.dataset_dev_split = "eval"
except KeyError:
try:
eval_dataset = textattack.datasets.HuggingFaceNlpDataset(
*dataset_args, split="validation"
)
args.dataset_dev_split = "validation"
except KeyError:
try:
eval_dataset = textattack.datasets.HuggingFaceNlpDataset(
*dataset_args, split="test"
)
args.dataset_dev_split = "test"
except KeyError:
raise KeyError(
f"Could not find `dev`, `eval`, `validation`, or `test` split in dataset {args.dataset}."
)
eval_text, eval_labels = prepare_dataset_for_training(eval_dataset)
return train_text, train_labels, eval_text, eval_labels
def model_from_args(train_args, num_labels, model_path=None):
"""Constructs a model from its `train_args.json`.
If huggingface model, loads from model hub address. If TextAttack
lstm/cnn, loads from disk (and `model_path` provides the path to the
model).
"""
if train_args.model == "lstm":
textattack.shared.logger.info("Loading textattack model: LSTMForClassification")
model = textattack.models.helpers.LSTMForClassification(
max_seq_length=train_args.max_length,
num_labels=num_labels,
emb_layer_trainable=False,
)
if model_path:
model.load_from_disk(model_path)
model = textattack.models.wrappers.PyTorchModelWrapper(model, model.tokenizer)
elif train_args.model == "cnn":
textattack.shared.logger.info(
"Loading textattack model: WordCNNForClassification"
)
model = textattack.models.helpers.WordCNNForClassification(
max_seq_length=train_args.max_length,
num_labels=num_labels,
emb_layer_trainable=False,
)
if model_path:
model.load_from_disk(model_path)
model = textattack.models.wrappers.PyTorchModelWrapper(model, model.tokenizer)
else:
import transformers
textattack.shared.logger.info(
f"Loading transformers AutoModelForSequenceClassification: {train_args.model}"
)
config = transformers.AutoConfig.from_pretrained(
train_args.model, num_labels=num_labels, finetuning_task=train_args.dataset
)
model = transformers.AutoModelForSequenceClassification.from_pretrained(
train_args.model, config=config,
)
tokenizer = textattack.models.tokenizers.AutoTokenizer(
train_args.model, use_fast=True, max_length=train_args.max_length
)
model = textattack.models.wrappers.HuggingFaceModelWrapper(model, tokenizer)
return model
def attack_from_args(args):
# note that this returns a recipe type, not an object
# (we need to wait to have access to the model to initialize)
attackCls = None
if args.attack:
if args.attack in ATTACK_RECIPE_NAMES:
attackCls = eval(ATTACK_RECIPE_NAMES[args.attack])
else:
raise ValueError(f"Unrecognized attack recipe: {args.attack}")
# check attack-related args
assert args.num_clean_epochs > 0, "--num-clean-epochs must be > 0"
return attackCls
def augmenter_from_args(args):
augmenter = None
if args.augment:
if args.augment in AUGMENTATION_RECIPE_NAMES:
augmenter = eval(AUGMENTATION_RECIPE_NAMES[args.augment])(
pct_words_to_swap=args.pct_words_to_swap,
transformations_per_example=args.transformations_per_example,
)
else:
raise ValueError(f"Unrecognized augmentation recipe: {args.augment}")
return augmenter
def write_readme(args, best_eval_score, best_eval_score_epoch):
# Save args to file
readme_save_path = os.path.join(args.output_dir, "README.md")
dataset_name = args.dataset.split(":")[0] if ":" in args.dataset else args.dataset
task_name = "regression" if args.do_regression else "classification"
loss_func = "mean squared error" if args.do_regression else "cross-entropy"
metric_name = "pearson correlation" if args.do_regression else "accuracy"
epoch_info = f"{best_eval_score_epoch} epoch" + (
"s" if best_eval_score_epoch > 1 else ""
)
readme_text = f"""
## TextAttack Model Card
This `{args.model}` model was fine-tuned for sequence classification using TextAttack
and the {dataset_name} dataset loaded using the `nlp` library. The model was fine-tuned
for {args.num_train_epochs} epochs with a batch size of {args.batch_size}, a learning
rate of {args.learning_rate}, and a maximum sequence length of {args.max_length}.
Since this was a {task_name} task, the model was trained with a {loss_func} loss function.
The best score the model achieved on this task was {best_eval_score}, as measured by the
eval set {metric_name}, found after {epoch_info}.
For more information, check out [TextAttack on Github](https://github.com/QData/TextAttack).
"""
with open(readme_save_path, "w", encoding="utf-8") as f:
f.write(readme_text.strip() + "\n")
logger.info(f"Wrote README to {readme_save_path}.")
| 38.811224 | 117 | 0.652951 |
abe12f27f2c3cd8ec5b4e9d8dbee7cb648677b97
| 4,042 |
py
|
Python
|
sdk/tests/batch/config/config_test.py
|
Omrisnyk/turing
|
f2ea45af2f49114aff304be2585ba6095fb8d3bd
|
[
"Apache-2.0"
] | null | null | null |
sdk/tests/batch/config/config_test.py
|
Omrisnyk/turing
|
f2ea45af2f49114aff304be2585ba6095fb8d3bd
|
[
"Apache-2.0"
] | null | null | null |
sdk/tests/batch/config/config_test.py
|
Omrisnyk/turing
|
f2ea45af2f49114aff304be2585ba6095fb8d3bd
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import turing.batch.config.source
import turing.batch.config.sink
import turing.generated.models
@pytest.mark.parametrize(
"source,predictions,result_config,sink,expected_fn", [
pytest.param(
turing.batch.config.source.BigQueryDataset(
table="project.table.dataset_1",
features=["feature_1", "feature_2", "feature_3"]
).join_on(columns=["feature_2", "feature_3"]),
{
"model_a":
turing.batch.config.source.BigQueryDataset(
table="project.table.model_a_results",
features=["feature_2", "feature_3", "prediction"]
).join_on(["feature_2", "feature_3"]).select(["prediction"])
},
turing.batch.config.ResultConfig(
type=turing.batch.config.ResultType.FLOAT,
column_name="ensembling_result"
),
(turing.batch.config.sink.BigQuerySink(
table="project.table.ensembling_results",
staging_bucket="staging_bucket",
options={})
.select(["feature_1", "ensembling_result"])
.save_mode(turing.batch.config.sink.SaveMode.IGNORE)),
lambda source, predictions, result_config, sink:
turing.generated.models.EnsemblingJobSpec(
source=source.to_open_api(),
predictions={name: source.to_open_api() for name, source in predictions.items()},
ensembler=turing.generated.models.EnsemblingJobEnsemblerSpec(
result=result_config.to_open_api()
),
sink=sink.to_open_api()
),
id="Initialize ensembling job spec"
)
]
)
def test_job_spec(source, predictions, result_config, sink, expected_fn):
job_config = turing.batch.config.EnsemblingJobConfig(
source=source,
predictions=predictions,
result_config=result_config,
sink=sink,
service_account="")
expected = expected_fn(source, predictions, result_config, sink)
assert job_config.job_spec() == expected
@pytest.mark.parametrize(
"service_account,resource_request,env_vars,expected_fn", [
pytest.param(
"[email protected]",
turing.batch.config.ResourceRequest(
driver_cpu_request="1",
driver_memory_request="1G",
executor_replica=5,
executor_cpu_request="500Mi",
executor_memory_request="800M"
),
{
"SOME_VAR": "SOME_VALUE"
},
lambda service_account, resource_request, env_vars:
turing.generated.models.EnsemblerInfraConfig(
service_account_name=service_account,
resources=resource_request
),
id="Initialize ensembling job infra spec"
),
pytest.param(
"[email protected]",
None,
{
"SOME_VAR": "SOME_VALUE"
},
lambda service_account, resource_request, env_vars:
turing.generated.models.EnsemblerInfraConfig(
service_account_name=service_account,
resources=resource_request
),
id="Initialize ensembling job with default resource request"
)
]
)
def test_infra_spec(service_account, resource_request, env_vars, expected_fn):
job_config = turing.batch.config.EnsemblingJobConfig(
source=None,
predictions={},
result_config=None,
sink=None,
service_account=service_account,
resource_request=resource_request,
env_vars=env_vars)
expected = expected_fn(service_account, resource_request, env_vars)
assert job_config.infra_spec() == expected
| 39.627451 | 101 | 0.58758 |
051a4b82378deb02b0391128639cee9154596f18
| 2,725 |
py
|
Python
|
bin/vmcheckerpaths.py
|
ironmissy/vmchecker
|
b5c241c959e8f297d632a6364f8d341c89e79a61
|
[
"MIT"
] | 1 |
2016-05-09T13:23:57.000Z
|
2016-05-09T13:23:57.000Z
|
bin/vmcheckerpaths.py
|
ironmissy/vmchecker
|
b5c241c959e8f297d632a6364f8d341c89e79a61
|
[
"MIT"
] | null | null | null |
bin/vmcheckerpaths.py
|
ironmissy/vmchecker
|
b5c241c959e8f297d632a6364f8d341c89e79a61
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""All paths related to vmchecker."""
import os
_STORER_CONFIG_FILE = 'vmchecker_storer.ini'
_TESTER_CONFIG_FILE = 'vmchecker_tester.ini'
GRADE_FILENAME = 'results/job_results'
root = None
repository = None
def set_root(root_):
"""Sets vmchecker root path"""
global root
root = os.path.expanduser(root_)
assert os.path.isabs(root)
root = os.path.normpath(root)
def set_repository(repository_):
"""Sets the repository path"""
global repository
repository = os.path.expanduser(repository_)
repository = os.path.join(root, repository)
repository = os.path.normpath(repository)
def abspath(*segments):
"""Joins the path segments of path with VMChecker's root path"""
return os.path.normpath(os.path.join(root, *segments))
def tester_paths():
"""A list of all the paths relevant to the tester machine."""
return [dir_queue(), dir_tester_unzip_tmp()]
def storer_paths():
"""A list of all the paths relevant to the storer machine."""
return [dir_unchecked(), dir_checked(),
dir_backup(), dir_tests()]
def dir_unchecked():
"""The absolute path of the unchecked homeworks.
This path is valid on the storer machine."""
return abspath('unchecked')
def dir_checked():
"""The absolute path of the checked homeworks.
This path is valid on the storer machine."""
return abspath('checked')
def dir_tests():
"""The absolute path of the test archives.
This path is valid on the storer machine."""
return abspath('tests')
def dir_queue():
"""The absolute path of the task queue directory.
This path is valid on the tester machine."""
return abspath('queue')
def dir_tester_unzip_tmp():
"""The absolute path of the directory where submission
archives are unzipped.
This path is valid on the tester machine."""
return abspath('tmpunzip')
def dir_backup():
"""The absolute path of the directory where backups
of tasks are kept.
This path is valid on the storer machine."""
return abspath('back')
def db_file():
"""The absolute path of the database file """
return abspath('vmchecker.db')
def dir_bin():
"""Returns absolute path for the bin/ directory"""
return abspath('bin')
def dir_assignment(assignment):
"""Returns path to all assignment submissions"""
return os.path.join(repository, assignment)
def dir_user(assignment, user):
"""Returns path to last user's assignment submission"""
return os.path.join(repository, assignment, user)
def dir_results(assignment, user):
"""Returns path to user's results on assignment"""
return os.path.join(repository, assignment, user, 'results')
| 24.115044 | 68 | 0.691009 |
5d5ddff564398de4b31c14e02f4cef8256a36630
| 1,945 |
py
|
Python
|
tensorflow/examples/learn/boston.py
|
gnoses/TensorFlow
|
63a21e054007d86269ed1ad0145ebce04ee57a81
|
[
"Apache-2.0"
] | 65 |
2016-09-26T01:30:40.000Z
|
2021-08-11T17:00:41.000Z
|
tensorflow/examples/learn/boston.py
|
gnoses/TensorFlow
|
63a21e054007d86269ed1ad0145ebce04ee57a81
|
[
"Apache-2.0"
] | 5 |
2017-02-21T08:37:52.000Z
|
2017-03-29T05:46:05.000Z
|
tensorflow/examples/learn/boston.py
|
gnoses/TensorFlow
|
63a21e054007d86269ed1ad0145ebce04ee57a81
|
[
"Apache-2.0"
] | 10 |
2017-03-28T06:16:10.000Z
|
2020-08-25T09:03:44.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNRegressor for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import metrics
from sklearn import preprocessing
import tensorflow as tf
def main(unused_argv):
# Load dataset
boston = tf.contrib.learn.datasets.load_dataset('boston')
x, y = boston.data, boston.target
# Split dataset into train / test
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
x, y, test_size=0.2, random_state=42)
# Scale data (training set) to 0 mean and unit standard deviation.
scaler = preprocessing.StandardScaler()
x_train = scaler.fit_transform(x_train)
# Build 2 layer fully connected DNN with 10, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[10, 10])
# Fit
regressor.fit(x_train, y_train, steps=5000, batch_size=1)
# Predict and score
y_predicted = list(
regressor.predict(
scaler.transform(x_test), as_iterable=True))
score = metrics.mean_squared_error(y_predicted, y_test)
print('MSE: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| 33.534483 | 75 | 0.753213 |
6f17906c054c61b6f31b3bc5074169b77b23dc2f
| 1,067 |
py
|
Python
|
observing/gather_test.py
|
AdamAbate-6/boatos
|
e31d62bdbebcb0c062793a2ebf119fe33e2b5eed
|
[
"MIT"
] | null | null | null |
observing/gather_test.py
|
AdamAbate-6/boatos
|
e31d62bdbebcb0c062793a2ebf119fe33e2b5eed
|
[
"MIT"
] | null | null | null |
observing/gather_test.py
|
AdamAbate-6/boatos
|
e31d62bdbebcb0c062793a2ebf119fe33e2b5eed
|
[
"MIT"
] | null | null | null |
# Simple demo of the FXOS8700 accelerometer and magnetometer.
# Will print the acceleration and magnetometer values every second.
import time
import board
import busio
import adafruit_fxos8700
import csv
import threading
class DataGathererTest(threading.Thread):
def __init__(self, delay):
# Initialize I2C bus and device.
self.i2c = busio.I2C(board.SCL, board.SDA)
self.sensor = adafruit_fxos8700.FXOS8700(self.i2c)
self.delay = delay
threading.Thread.__init__(self)
def run(self):
with open('/home/pi/boat-os/observing/mag_data_test.csv', 'w') as csvfile:
csvWriter = csv.writer(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)
fieldnames = ['9dof_m_x', '9dof_m_y', '9dof_m_z']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
while 1:
mag_x, mag_y, mag_z = self.sensor.magnetometer
csvWriter.writerow([mag_x, mag_y, mag_z])
time.sleep(self.delay)
| 36.793103 | 100 | 0.657919 |
8428231843b6e3a59895f51bf0c918a395d47577
| 1,682 |
py
|
Python
|
morse.py
|
Sondosissa18/se-q3-morse-code
|
2f5689a1beeb6c4dba3217b20831f73122398b21
|
[
"MIT"
] | null | null | null |
morse.py
|
Sondosissa18/se-q3-morse-code
|
2f5689a1beeb6c4dba3217b20831f73122398b21
|
[
"MIT"
] | null | null | null |
morse.py
|
Sondosissa18/se-q3-morse-code
|
2f5689a1beeb6c4dba3217b20831f73122398b21
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Morse Code Decoder
"Dot" – is 1 time unit long.
"Dash" – is 3 time units long.
Pause between dots and dashes in a character – is 1 time unit long.
Pause between characters inside a word – is 3 time units long.
Pause between words – is 7 time units long.
"""
__author__ = "sondos with help from Gabby and piero"
from morse_dict import MORSE_2_ASCII
import re
def decode_bits(bits):
bits = bits.strip("0")
time_unit = min([len(g) for g in re.findall(r"1+|0+", bits)])
morse_code_pits = bits.replace("0000000"*time_unit, " ")
morse_code_pits = morse_code_pits.replace("000"*time_unit, " ")
morse_code_pits = morse_code_pits.replace("111"*time_unit, "-")
morse_code_pits = morse_code_pits.replace("1"*time_unit, ".")
morse_code_pits = morse_code_pits.replace("0"*time_unit, "")
return morse_code_pits
def decode_morse(morse):
code = ''
for word in morse.strip().split(" "):
for char in word.split(" "):
code += MORSE_2_ASCII[char]
code += " "
return code.strip()
if __name__ == '__main__':
hey_jude_morse = ".... . -.-- .--- ..- -.. ."
hey_jude_bits = "11001100110011000000110000001111110011001111110011111100000000000000000000011001111110011111100111111000000110011001111110000001111110011001100000011" # noqa
# Be sure to run all included unit tests, not just this one.
print("Morse Code decoder test")
print("Part A:")
print(f"'{hey_jude_morse}' -> {decode_morse(hey_jude_morse)}")
print()
print("Part B:")
print(f"'{hey_jude_bits}' -> {decode_morse(decode_bits(hey_jude_bits))}")
print("\nCompleted.")
| 32.980392 | 179 | 0.675386 |
eba210539f9c956c63aadab68e41dee6dfb8c777
| 934 |
py
|
Python
|
main.py
|
gidoBOSSftw5731/mini-obd2
|
ecdb8aa3a0f6de6af8d6ec6d6b653d9bb05bc756
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
gidoBOSSftw5731/mini-obd2
|
ecdb8aa3a0f6de6af8d6ec6d6b653d9bb05bc756
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
gidoBOSSftw5731/mini-obd2
|
ecdb8aa3a0f6de6af8d6ec6d6b653d9bb05bc756
|
[
"Apache-2.0"
] | null | null | null |
import time
import datetime
from adafruit_ht16k33 import segments
import board
import busio
import obd
import re
#setup connection to bt obd crap
obd.logger.setLevel(obd.logging.DEBUG)
ports = obd.scan_serial()
if len(ports) == 0:
time.sleep(1)
raise Exception("no bt things!")
#guess we found a serial thing now
#open it up for real
connection = obd.OBD(ports[0])
# Create the I2C interface.
i2c = busio.I2C(board.SCL, board.SDA)
# Create the LED segment class.
# This creates a 7 segment 4 character display:
display = segments.Seg7x4(i2c)
# clear display
display.fill(0)
while True:
val = connection.query(obd.commands["SPEED"]).value
print(val)
out = re.findall("\d+\.\d+", str(val))
if len(out) == 0:
display.print(str(" 0ff")[::-1])
time.sleep(.2)
continue
display.print("0000" + str(float(str(out[0]))*0.621371)[::-1])
# print(round(x,0))
# print(x)
# x+=1
time.sleep(0.1)
# input("enter for next")
| 17.961538 | 63 | 0.694861 |
042e6de53c4d8b8f911345354c808d459f56ef56
| 6,610 |
py
|
Python
|
ethereum/hybrid_casper/casper_initiating_transactions.py
|
IIIIllllIIIIllllIIIIllllIIIIllllIIIIll/pyethereum
|
d962694be03686a8e5c1d7459ae272b70a5c9f77
|
[
"MIT"
] | 1 |
2019-01-18T03:20:01.000Z
|
2019-01-18T03:20:01.000Z
|
ethereum/hybrid_casper/casper_initiating_transactions.py
|
IIIIllllIIIIllllIIIIllllIIIIllllIIIIll/pyethereum
|
d962694be03686a8e5c1d7459ae272b70a5c9f77
|
[
"MIT"
] | 8 |
2020-06-05T21:36:23.000Z
|
2022-02-12T12:24:00.000Z
|
ethereum/hybrid_casper/casper_initiating_transactions.py
|
IIIIllllIIIIllllIIIIllllIIIIllllIIIIll/pyethereum
|
d962694be03686a8e5c1d7459ae272b70a5c9f77
|
[
"MIT"
] | null | null | null |
import pkg_resources
from vyper import compiler
from ethereum.transactions import Transaction
from ethereum.abi import ContractTranslator
import rlp
from ethereum.utils import decode_hex
gasprice = 25 * 10**9
viper_rlp_decoder_tx = decode_hex("0xf90237808506fc23ac00830330888080b902246102128061000e60003961022056600060007f010000000000000000000000000000000000000000000000000000000000000060003504600060c082121515585760f882121561004d5760bf820336141558576001905061006e565b600181013560f783036020035260005160f6830301361415585760f6820390505b5b368112156101c2577f010000000000000000000000000000000000000000000000000000000000000081350483602086026040015260018501945060808112156100d55760018461044001526001828561046001376001820191506021840193506101bc565b60b881121561014357608081038461044001526080810360018301856104600137608181141561012e5760807f010000000000000000000000000000000000000000000000000000000000000060018401350412151558575b607f81038201915060608103840193506101bb565b60c08112156101b857600182013560b782036020035260005160388112157f010000000000000000000000000000000000000000000000000000000000000060018501350402155857808561044001528060b6838501038661046001378060b6830301830192506020810185019450506101ba565bfe5b5b5b5061006f565b601f841315155857602060208502016020810391505b6000821215156101fc578082604001510182826104400301526020820391506101d8565b808401610420528381018161044003f350505050505b6000f31b2d4f", Transaction) # noqa: E501
sig_hasher_tx = decode_hex("0xf9016d808506fc23ac0083026a508080b9015a6101488061000e6000396101565660007f01000000000000000000000000000000000000000000000000000000000000006000350460f8811215610038576001915061003f565b60f6810391505b508060005b368312156100c8577f01000000000000000000000000000000000000000000000000000000000000008335048391506080811215610087576001840193506100c2565b60b881121561009d57607f8103840193506100c1565b60c08112156100c05760b68103600185013560b783036020035260005101840193505b5b5b50610044565b81810360388112156100f4578060c00160005380836001378060010160002060e052602060e0f3610143565b61010081121561010557600161011b565b6201000081121561011757600261011a565b60035b5b8160005280601f038160f701815382856020378282600101018120610140526020610140f350505b505050505b6000f31b2d4f", Transaction) # noqa: E501
purity_checker_tx = decode_hex("0xf90467808506fc23ac00830583c88080b904546104428061000e60003961045056600061033f537c0100000000000000000000000000000000000000000000000000000000600035047f80010000000000000000000000000000000000000030ffff1c0e00000000000060205263a1903eab8114156103f7573659905901600090523660048237600435608052506080513b806020015990590160009052818152602081019050905060a0526080513b600060a0516080513c6080513b8060200260200159905901600090528181526020810190509050610100526080513b806020026020015990590160009052818152602081019050905061016052600060005b602060a05103518212156103c957610100601f8360a051010351066020518160020a161561010a57fe5b80606013151561011e57607f811315610121565b60005b1561014f5780607f036101000a60018460a0510101510482602002610160510152605e8103830192506103b2565b60f18114801561015f5780610164565b60f282145b905080156101725780610177565b60f482145b9050156103aa5760028212151561019e5760606001830360200261010051015112156101a1565b60005b156101bc57607f6001830360200261010051015113156101bf565b60005b156101d157600282036102605261031e565b6004821215156101f057600360018303602002610100510151146101f3565b60005b1561020d57605a6002830360200261010051015114610210565b60005b1561022b57606060038303602002610100510151121561022e565b60005b1561024957607f60038303602002610100510151131561024c565b60005b1561025e57600482036102605261031d565b60028212151561027d57605a6001830360200261010051015114610280565b60005b1561029257600282036102605261031c565b6002821215156102b157609060018303602002610100510151146102b4565b60005b156102c657600282036102605261031b565b6002821215156102e65760806001830360200261010051015112156102e9565b60005b156103035760906001830360200261010051015112610306565b60005b1561031857600282036102605261031a565bfe5b5b5b5b5b604060405990590160009052600081526102605160200261016051015181602001528090502054156103555760016102a052610393565b60306102605160200261010051015114156103755760016102a052610392565b60606102605160200261010051015114156103915760016102a0525b5b5b6102a051151561039f57fe5b6001830192506103b1565b6001830192505b5b8082602002610100510152600182019150506100e0565b50506001604060405990590160009052600081526080518160200152809050205560016102e05260206102e0f35b63c23697a8811415610440573659905901600090523660048237600435608052506040604059905901600090526000815260805181602001528090502054610300526020610300f35b505b6000f31b2d4f", Transaction) # noqa: E501
purity_checker_address = purity_checker_tx.creates
purity_checker_abi = [{'name': 'check(address)', 'type': 'function', 'constant': True, 'inputs': [{'name': 'addr', 'type': 'address'}], 'outputs': [{'name': 'out', 'type': 'bool'}]}, {'name': 'submit(address)', 'type': 'function', 'constant': False, 'inputs': [{'name': 'addr', 'type': 'address'}], 'outputs': [{'name': 'out', 'type': 'bool'}]}] # noqa: E501
viper_rlp_decoder_address = viper_rlp_decoder_tx.creates
sig_hasher_address = sig_hasher_tx.creates
_casper_contract_path = '/'.join(('..', 'casper', 'casper', 'contracts', 'simple_casper.v.py'))
casper_code = str(pkg_resources.resource_string('ethereum', _casper_contract_path), 'utf-8')
casper_bytecode = compiler.compile(casper_code)
casper_abi = compiler.mk_full_signature(casper_code)
casper_ct = ContractTranslator(casper_abi)
def mk_initializers(config, sender_privkey, starting_nonce=0):
o = []
nonce = starting_nonce
# Create transactions for instantiating RLP decoder, sig hasher and purity checker, plus transactions for feeding the
# one-time accounts that generate those transactions
for tx in (viper_rlp_decoder_tx, sig_hasher_tx, purity_checker_tx):
o.append(Transaction(nonce, gasprice, 90000, tx.sender, tx.startgas * tx.gasprice + tx.value, '').sign(sender_privkey))
o.append(tx)
nonce += 1
# Casper initialization transaction
casper_tx = Transaction(nonce, gasprice, 5000000, b'', 0, casper_bytecode).sign(sender_privkey)
# Casper initiate call (separate from initialization to save gas)
initiate_args = casper_ct.encode('initiate', [
config["EPOCH_LENGTH"], config["WITHDRAWAL_DELAY"], config["OWNER"], sig_hasher_address,
purity_checker_address, config["BASE_INTEREST_FACTOR"], config["BASE_PENALTY_FACTOR"]
])
casper_initiate_tx = Transaction(nonce + 1, gasprice, 1000000, casper_tx.creates, 0, initiate_args).sign(sender_privkey)
# Return list of transactions and Casper address
return o + [casper_tx, casper_initiate_tx], casper_tx.creates
| 143.695652 | 2,323 | 0.901059 |
f91288364d645c2ef64b4edf1a7742d645400bb9
| 19,149 |
py
|
Python
|
build/env/lib/python2.7/site-packages/windmill-1.6-py2.7.egg/windmill/server/forwardmanager.py
|
bopopescu/myhue
|
5f566970a5a1fa5af9f01832c9e9808c47634bc7
|
[
"Apache-2.0"
] | 61 |
2015-03-16T18:36:06.000Z
|
2021-12-02T10:08:17.000Z
|
windmill/server/forwardmanager.py
|
admc/windmill
|
4304ee7258eb0c2814f215d8ce90abf02b1f737f
|
[
"Apache-2.0"
] | 8 |
2015-03-10T10:01:26.000Z
|
2020-05-18T10:51:24.000Z
|
windmill/server/forwardmanager.py
|
admc/windmill
|
4304ee7258eb0c2814f215d8ce90abf02b1f737f
|
[
"Apache-2.0"
] | 14 |
2015-01-29T16:28:33.000Z
|
2021-09-04T11:19:48.000Z
|
# Copyright (c) 2009 Canonical Ltd.
# Copyright (c) 2009 Mikeal Rogers <[email protected]>
# Copyright (c) 2009 Domen Kozar <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contributor: Anthony Lenton <[email protected]>
import proxy
import time
import sys
if not sys.version.startswith('2.4'):
from urlparse import urlparse
else:
# python 2.4
from windmill.tools.urlparse_25 import urlparse
def normalize(scheme, netloc):
if scheme == '':
scheme = 'http'
if scheme == 'https' and netloc.endswith(':443'):
netloc = netloc[:-4]
if scheme == 'http' and netloc.endswith(':80'):
netloc = netloc[:-3]
return scheme, netloc
def urlmatch(a, b):
""" Returns True if urls a and b use the same scheme, netloc and port """
return normalize(a.scheme, a.netloc) == normalize(b.scheme, b.netloc)
class ForwardManager(object):
""" Handles and remembers forwarded domains, maps and unmaps urls and
modifies environment variables and cookies so they work. """
def __init__(self, base_url):
self.forwarded = {} # Maps str->tuple(str,str) forwarded URL-> original scheme, netloc
self.static = {} # Maps str->tuple(str,str)
parsed_url = urlparse(base_url)
self.base_url = "%s://%s" % (parsed_url.scheme, parsed_url.netloc)
self.cookies = {parsed_url.netloc: {}}
def forward_map(self, url):
""" Return a ParseResult useful after forwarding """
path = url.path
if path.startswith('/'):
path = path.lstrip('/')
redirect_url = "%s/%s" % (self.base_url, path)
if url.query:
redirect_url += "?" + url.query
return urlparse(redirect_url)
def forward_unmap(self, url):
""" Return a ParseResult for the unforwarded URL """
if not self.is_forward_mapped(url):
return None
scheme, netloc = self.forwarded[url.geturl()]
path = url.path
if path.startswith('/'):
path = path.lstrip('/')
orig_url = "%s://%s/%s" % (scheme, netloc, path)
if len(url.query) > 0:
orig_url += "?" + url.query
return urlparse(orig_url)
def is_forward_mapped(self, url):
return url.geturl() in self.forwarded
def is_static_forwarded(self, url):
return url.netloc in self.static
def change_environ_domain(self, srcUrl, dstUrl, environ):
""" """
newEnv = environ.copy()
dst_host = "%s://%s" % (dstUrl.scheme, dstUrl.netloc)
src_host = "%s://%s" % (srcUrl.scheme, srcUrl.netloc)
for key in newEnv:
if type(newEnv[key]) is str:
if src_host in newEnv[key]:
newEnv[key] = newEnv[key].replace(src_host, dst_host)
elif srcUrl.netloc in newEnv[key]:
newEnv[key] = newEnv[key].replace(srcUrl.netloc, dstUrl.netloc)
elif srcUrl.scheme in ['http', 'https'] and srcUrl.scheme == newEnv[key]:
newEnv[key] = dstUrl.scheme
self.set_cookies(dstUrl.netloc, environ)
return newEnv
def forward(self, url, environ):
if self.is_static_forwarded(url):
fwdurl = url.geturl().replace(url.netloc, self.static[url.netloc])
else:
fwdurl = self.forward_map(url).geturl()
origdata = normalize(url.scheme, url.netloc)
self.forwarded[fwdurl] = origdata
newEnv = self.change_environ_domain(url, urlparse(fwdurl), environ)
return newEnv
def known_hosts(self):
def split(domain):
if not domain.startswith('http'):
return 'http', domain
else:
return tuple(domain.split('://'))
def getDomains(domain_list):
result = []
for domain in domain_list:
if not domain.startswith('http'):
result.append(('http', domain,))
result.append(('https', domain,))
else:
result.append(tuple(domain.split('://')))
return result
first = getDomains(proxy.first_forward_domains)
exclude = getDomains(proxy.exclude_from_retry)
forwarded = list(set(self.forwarded.values()))
result = list(urlparse("%s://%s/" % host)
for host in first + forwarded
if not host in exclude)
return result
def forward_to(self, url, host):
forwarded_url = "%s://%s%s" % (host.scheme, host.netloc, url.path)
if url.query:
forwarded_url += "?" + url.query
return urlparse(forwarded_url)
def parse_headers(self, headers, domain):
""" Store cookies for the given domain, or forget them if they're marked
for expiration. Then remove the cookie header, as it'll be
incorrectly stored in the browser as corresponding to the test
domain. """
if domain not in self.cookies:
self.cookies[domain] = {}
for header in headers:
key, value = header
if key == 'set-cookie':
cookiekey = None
cookieval = None
parts = value.split(';')
expired = False
dom = domain
for part in parts:
token = [p.strip() for p in part.split('=', 1)]
if len(token) == 1:
continue
k, v = token
if cookiekey is None:
cookiekey = k
cookieval = v
elif k.lower() == 'domain':
dom = v
elif k.lower() == 'expires':
now = time.time()
formats = ['%a, %d-%b-%Y %H:%M:%S GMT',
'%a, %d %b %Y %H:%M:%S GMT',
'%a, %d-%b-%y %H:%M:%S GMT',
'%a, %d %b %y %H:%M:%S GMT']
found = False
for f in formats:
try:
expires = time.strptime(v, f)
found = True
break
except ValueError:
pass # Continue with next format
if found and time.time() > time.mktime(expires):
expired = True
if not dom in self.cookies:
self.cookies[dom] = {}
if expired:
if cookiekey in self.cookies[dom]:
del self.cookies[dom][cookiekey]
else:
self.cookies[dom][cookiekey] = cookieval
def cookies_for(self, domain):
cookies = []
for d in self.cookies:
if domain.endswith(d):
cookies += ["%s=%s" % c for c in self.cookies[d].items()]
result = '; '.join(cookies)
return result
def set_cookies(self, domain, environ):
return # Remove this line to enable cookie handling
environ['HTTP_COOKIE'] = self.cookies_for(domain)
if len(environ['HTTP_COOKIE']) == 0:
del environ['HTTP_COOKIE']
def clear(self):
self.forwarded = {}
if __name__ == '__main__':
import unittest
class TestManager(unittest.TestCase):
def setUp(self):
self.aUrl = urlparse('http://otherurl/bla')
self.bUrl = urlparse('https://testurl/yadda')
self.cUrl = urlparse('https://otherurl/foobar')
self.dUrl = urlparse('https://otherurl:8000/foobar')
self.eUrl = urlparse('http://testurl/forwarded')
proxy.first_forward_domains = []
proxy.exclude_from_retry = []
def testStaticMap(self):
mgr = ForwardManager('http://testurl/')
self.assertFalse(mgr.is_static_forwarded(self.aUrl))
mgr.static['otherurl'] = ('http', 'testurl')
self.assertTrue(mgr.is_static_forwarded(self.aUrl))
def testForwardMap(self):
mgr = ForwardManager('http://testurl/')
expected = urlparse('http://testurl/bla')
self.assertEquals(expected, mgr.forward_map(self.aUrl))
expected = urlparse('http://testurl/yadda')
self.assertEquals(expected, mgr.forward_map(self.bUrl))
expected = urlparse('http://testurl/foobar')
self.assertEquals(expected, mgr.forward_map(self.cUrl))
expected = urlparse('http://testurl/foobar')
self.assertEquals(expected, mgr.forward_map(self.dUrl))
def testUnmapEnviron(self):
testEnv = {'HTTP_ACCEPT': 'text/html,application/xhtml+xml',
'CONTENT_TYPE': 'text/plain',
'SCRIPT_NAME': '',
'REQUEST_METHOD': 'GET',
'HTTP_HOST': 'testurl',
'PATH_INFO': 'http://testurl/bla/',
'SERVER_PROTOCOL': 'HTTP/1.1',
'QUERY_STRING': '',
'CONTENT_LENGTH': '',
'HTTP_ACCEPT_CHARSET': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'reconstructed_url': 'http://testurl/bla/',
'SERVER_NAME': '127.0.0.1',
'GATEWAY_INTERFACE': 'CGI/1.1',
'HTTP_PROXY_CONNECTION': 'keep-alive',
'REMOTE_ADDR': '127.0.0.1',
'HTTP_ACCEPT_LANGUAGE': 'en-gb,en;q=0.5',
'wsgi.url_scheme': 'http',
'SERVER_PORT': 4444,
'REMOTE_HOST': '127.0.0.1',
'HTTP_ACCEPT_ENCODING': 'gzip,deflate',
'HTTP_KEEP_ALIVE': '300'}
origUrl = urlparse('https://otherurl/bla/')
mgr = ForwardManager('http://testurl/')
newEnv = mgr.change_environ_domain(mgr.forward_map(origUrl), origUrl, testEnv)
self.assertEquals(newEnv['PATH_INFO'], 'https://otherurl/bla/')
self.assertEquals(newEnv['HTTP_HOST'], 'otherurl')
self.assertEquals(newEnv['reconstructed_url'], 'https://otherurl/bla/')
self.assertEquals(newEnv['wsgi.url_scheme'], 'https')
def testMapEnviron(self):
testEnv = {'HTTP_ACCEPT': 'text/html,application/xhtml+xml',
'CONTENT_TYPE': 'text/plain',
'SCRIPT_NAME': '',
'REQUEST_METHOD': 'GET',
'HTTP_HOST': 'otherurl',
'PATH_INFO': 'https://otherurl/bla/',
'SERVER_PROTOCOL': 'HTTP/1.1',
'QUERY_STRING': '',
'CONTENT_LENGTH': '',
'HTTP_ACCEPT_CHARSET': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'reconstructed_url': 'https://otherurl/bla/',
'SERVER_NAME': '127.0.0.1',
'GATEWAY_INTERFACE': 'CGI/1.1',
'HTTP_PROXY_CONNECTION': 'keep-alive',
'REMOTE_ADDR': '127.0.0.1',
'HTTP_ACCEPT_LANGUAGE': 'en-gb,en;q=0.5',
'wsgi.url_scheme': 'https',
'SERVER_PORT': 4444,
'REMOTE_HOST': '127.0.0.1',
'HTTP_ACCEPT_ENCODING': 'gzip,deflate',
'HTTP_KEEP_ALIVE': '300'}
fwdUrl = urlparse('http://testurl/bla/')
origUrl = urlparse('https://otherurl/bla/')
mgr = ForwardManager('http://testurl/')
newEnv = mgr.change_environ_domain(origUrl, fwdUrl, testEnv)
self.assertEquals(newEnv['PATH_INFO'], 'http://testurl/bla/')
self.assertEquals(newEnv['HTTP_HOST'], 'testurl')
self.assertEquals(newEnv['reconstructed_url'], 'http://testurl/bla/')
self.assertEquals(newEnv['wsgi.url_scheme'], 'http')
def testUnmap(self):
mgr = ForwardManager('http://testurl/')
orig_url = urlparse('http://otherurl/bla')
mgr.forward(orig_url, {})
result = mgr.forward_unmap(urlparse('http://testurl/bla'))
self.assertEquals(orig_url, result)
def testForward(self):
testEnv = {'HTTP_ACCEPT': 'text/html,application/xhtml+xml',
'CONTENT_TYPE': 'text/plain',
'SCRIPT_NAME': '',
'REQUEST_METHOD': 'GET',
'HTTP_HOST': 'otherurl',
'PATH_INFO': 'https://otherurl/bla/',
'SERVER_PROTOCOL': 'HTTP/1.1',
'QUERY_STRING': '',
'CONTENT_LENGTH': '',
'HTTP_ACCEPT_CHARSET': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'reconstructed_url': 'https://otherurl/bla/',
'SERVER_NAME': '127.0.0.1',
'GATEWAY_INTERFACE': 'CGI/1.1',
'HTTP_PROXY_CONNECTION': 'keep-alive',
'REMOTE_ADDR': '127.0.0.1',
'HTTP_ACCEPT_LANGUAGE': 'en-gb,en;q=0.5',
'wsgi.url_scheme': 'https',
'SERVER_PORT': 4444,
'REMOTE_HOST': '127.0.0.1',
'HTTP_ACCEPT_ENCODING': 'gzip,deflate',
'HTTP_KEEP_ALIVE': '300'}
fwdUrl = urlparse('http://testurl/bla/')
origUrl = urlparse('https://otherurl/bla/')
mgr = ForwardManager('http://testurl/')
newEnv = mgr.forward(origUrl, testEnv)
newUrl = mgr.forward_map(origUrl)
self.assertEquals(newEnv['PATH_INFO'], 'http://testurl/bla/')
self.assertEquals(newEnv['HTTP_HOST'], 'testurl')
self.assertEquals(newEnv['reconstructed_url'], 'http://testurl/bla/')
self.assertEquals(newEnv['wsgi.url_scheme'], 'http')
self.assertEquals(newUrl, fwdUrl)
self.assertTrue(mgr.is_forward_mapped(newUrl))
def testForwardTo(self):
mgr = ForwardManager('http://testurl/')
self.assertEquals(urlparse("http://otherurl/forwarded"), mgr.forward_to(self.eUrl, self.aUrl))
def testForwardToDoesntDropQuery(self):
mgr = ForwardManager('http://testurl/')
query_url = urlparse("https://something/forwarded?foo=bar")
self.assertEquals(urlparse("http://otherurl/forwarded?foo=bar"),
mgr.forward_to(query_url, self.aUrl))
def testForwardMapDoesntDropQuery(self):
mgr = ForwardManager('http://testurl/')
query_url = urlparse("https://something/forwarded?foo=bar")
self.assertEquals(urlparse("http://testurl/forwarded?foo=bar"),
mgr.forward_map(query_url))
def testTestSiteWithPathDoesntBreakRedirects(self):
mgr = ForwardManager('http://testurl/path/')
fwdUrl = urlparse('http://testurl/bla/')
origUrl = urlparse('https://otherurl/bla/')
mgr.forward(origUrl, {})
newUrl = mgr.forward_map(origUrl)
self.assertEquals(newUrl, fwdUrl)
def testClear(self):
mgr = ForwardManager('http://testurl/path/')
origUrl = urlparse('https://otherurl/bla/')
fwdUrl = mgr.forward_map(origUrl)
mgr.forward(origUrl, {})
self.assertTrue(mgr.is_forward_mapped(fwdUrl))
mgr.clear()
self.assertFalse(mgr.is_forward_mapped(fwdUrl))
def testFirstForwardDomains(self):
proxy.first_forward_domains.append('goodurl.com')
proxy.first_forward_domains.append('https://greaturl.com')
mgr = ForwardManager('http://testurl/path/')
first = urlparse('http://goodurl.com/')
self.assertTrue(first in mgr.known_hosts())
second = urlparse('https://greaturl.com/')
self.assertTrue(second in mgr.known_hosts())
# Check that they're reported in order
hosts = mgr.known_hosts()
self.assertTrue(hosts.index(first) < hosts.index(second))
def testExcludeFromRetry(self):
proxy.exclude_from_retry.append('badurl.com')
mgr = ForwardManager('http://testurl/path/')
mgr.forward(self.aUrl, {})
mgr.forward(urlparse('http://badurl.com/sarasa'), {})
self.assertTrue(len(mgr.known_hosts()) == 1)
def testExcludeTakesPrecedence(self):
""" Test that exclude_from_retry takes precedence over
first_forward_domains -- i.e. if a domain is added to both
lists, it's *not* reported in known_hosts
"""
proxy.first_forward_domains.append('goodurl.com')
proxy.exclude_from_retry.append('goodurl.com')
mgr = ForwardManager('http://testurl/path/')
self.assertTrue(len(mgr.known_hosts()) == 0)
def testParseCookies(self):
headers = [('server', ' '), ('cache-control', ' no-cache'),
('content-encoding', ' gzip'),
('set-cookie', ' a=42; path=/; secure'),
('set-cookie', ' b=test; path=/; secure'),
('set-cookie', ' c=1; domain=.b.c; path=/'),
('set-cookie', ' e=1; path=/'),
('set-cookie', ' f=0; path=/; secure'),
('set-cookie', ' g=Nj; domain=.c; path=/;'),
('content-type', ' text/html'),
('content-length', ' 510'),
('date', ' sarasa')]
mgr = ForwardManager('http://testurl/path/')
mgr.parse_headers(headers, 'a.b.c')
self.assertEquals('g=Nj; a=42; b=test; e=1; f=0; c=1',
mgr.cookies_for('a.b.c'))
headers = [('set-cookie', 'e=; expires=Thu, 01-Dec-1994 16:00:00 GMT'),
('set-cookie', 'a=; expires=Thu, 01 Dec 1994 16:00:00 GMT'),
]
mgr.parse_headers(headers, 'a.b.c')
self.assertEquals('g=Nj; b=test; f=0; c=1',
mgr.cookies_for('a.b.c'))
unittest.main()
| 45.376777 | 106 | 0.523578 |
5825fd58cb45f6d30664a3f64f8e345e61843a94
| 414 |
py
|
Python
|
shipWithinDays.py
|
hazardinho/LeetcodeSolutions
|
3f7fee882c1cdc83ecf7c9fd05d2a7f1afb130e6
|
[
"MIT"
] | 1 |
2020-05-21T09:29:48.000Z
|
2020-05-21T09:29:48.000Z
|
shipWithinDays.py
|
Ich1goSan/LeetcodeSolutions
|
3f7fee882c1cdc83ecf7c9fd05d2a7f1afb130e6
|
[
"MIT"
] | null | null | null |
shipWithinDays.py
|
Ich1goSan/LeetcodeSolutions
|
3f7fee882c1cdc83ecf7c9fd05d2a7f1afb130e6
|
[
"MIT"
] | null | null | null |
def shipWithinDays(self, weights: List[int], D: int) -> int:
l = max(weights)
r = sum(weights)
while l < r:
mid = (l + r) // 2
if self.days(weights, mid) >= D:
l = mid + 1
else:
r = mid
return l
def days(self, weights, k):
s = 0
d = 0
for i in weights:
s += i
if s > k:
d += 1
s = i
return d
| 18.818182 | 60 | 0.413043 |
b3358d99832937d60e9de8568bb44b010c5add8f
| 105,931 |
py
|
Python
|
Lib/site-packages/astroid/nodes/scoped_nodes/scoped_nodes.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/astroid/nodes/scoped_nodes/scoped_nodes.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/astroid/nodes/scoped_nodes/scoped_nodes.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt
"""
This module contains the classes for "scoped" node, i.e. which are opening a
new local scope in the language definition : Module, ClassDef, FunctionDef (and
Lambda, GeneratorExp, DictComp and SetComp to some extent).
"""
import io
import itertools
import os
import sys
import typing
import warnings
from typing import TYPE_CHECKING, Dict, List, Optional, Set, TypeVar, Union, overload
from astroid import bases
from astroid import decorators as decorators_mod
from astroid import mixins, util
from astroid.const import IS_PYPY, PY38, PY38_PLUS, PY39_PLUS
from astroid.context import (
CallContext,
InferenceContext,
bind_context_to_node,
copy_context,
)
from astroid.exceptions import (
AstroidBuildingError,
AstroidTypeError,
AttributeInferenceError,
DuplicateBasesError,
InconsistentMroError,
InferenceError,
MroError,
StatementMissing,
TooManyLevelsError,
)
from astroid.interpreter.dunder_lookup import lookup
from astroid.interpreter.objectmodel import ClassModel, FunctionModel, ModuleModel
from astroid.manager import AstroidManager
from astroid.nodes import Arguments, Const, NodeNG, node_classes
from astroid.nodes.scoped_nodes.mixin import ComprehensionScope, LocalsDictNodeNG
from astroid.nodes.scoped_nodes.utils import builtin_lookup
from astroid.nodes.utils import Position
if sys.version_info >= (3, 6, 2):
from typing import NoReturn
else:
from typing_extensions import NoReturn
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
if sys.version_info >= (3, 8) or TYPE_CHECKING:
from functools import cached_property
else:
# pylint: disable-next=ungrouped-imports
from astroid.decorators import cachedproperty as cached_property
ITER_METHODS = ("__iter__", "__getitem__")
EXCEPTION_BASE_CLASSES = frozenset({"Exception", "BaseException"})
objects = util.lazy_import("objects")
BUILTIN_DESCRIPTORS = frozenset(
{"classmethod", "staticmethod", "builtins.classmethod", "builtins.staticmethod"}
)
T = TypeVar("T")
def _c3_merge(sequences, cls, context):
"""Merges MROs in *sequences* to a single MRO using the C3 algorithm.
Adapted from http://www.python.org/download/releases/2.3/mro/.
"""
result = []
while True:
sequences = [s for s in sequences if s] # purge empty sequences
if not sequences:
return result
for s1 in sequences: # find merge candidates among seq heads
candidate = s1[0]
for s2 in sequences:
if candidate in s2[1:]:
candidate = None
break # reject the current head, it appears later
else:
break
if not candidate:
# Show all the remaining bases, which were considered as
# candidates for the next mro sequence.
raise InconsistentMroError(
message="Cannot create a consistent method resolution order "
"for MROs {mros} of class {cls!r}.",
mros=sequences,
cls=cls,
context=context,
)
result.append(candidate)
# remove the chosen candidate
for seq in sequences:
if seq[0] == candidate:
del seq[0]
return None
def clean_typing_generic_mro(sequences: List[List["ClassDef"]]) -> None:
"""A class can inherit from typing.Generic directly, as base,
and as base of bases. The merged MRO must however only contain the last entry.
To prepare for _c3_merge, remove some typing.Generic entries from
sequences if multiple are present.
This method will check if Generic is in inferred_bases and also
part of bases_mro. If true, remove it from inferred_bases
as well as its entry the bases_mro.
Format sequences: [[self]] + bases_mro + [inferred_bases]
"""
bases_mro = sequences[1:-1]
inferred_bases = sequences[-1]
# Check if Generic is part of inferred_bases
for i, base in enumerate(inferred_bases):
if base.qname() == "typing.Generic":
position_in_inferred_bases = i
break
else:
return
# Check if also part of bases_mro
# Ignore entry for typing.Generic
for i, seq in enumerate(bases_mro):
if i == position_in_inferred_bases:
continue
if any(base.qname() == "typing.Generic" for base in seq):
break
else:
return
# Found multiple Generics in mro, remove entry from inferred_bases
# and the corresponding one from bases_mro
inferred_bases.pop(position_in_inferred_bases)
bases_mro.pop(position_in_inferred_bases)
def clean_duplicates_mro(sequences, cls, context):
for sequence in sequences:
names = [
(node.lineno, node.qname()) if node.name else None for node in sequence
]
last_index = dict(map(reversed, enumerate(names)))
if names and names[0] is not None and last_index[names[0]] != 0:
raise DuplicateBasesError(
message="Duplicates found in MROs {mros} for {cls!r}.",
mros=sequences,
cls=cls,
context=context,
)
yield [
node
for i, (node, name) in enumerate(zip(sequence, names))
if name is None or last_index[name] == i
]
def function_to_method(n, klass):
if isinstance(n, FunctionDef):
if n.type == "classmethod":
return bases.BoundMethod(n, klass)
if n.type == "property":
return n
if n.type != "staticmethod":
return bases.UnboundMethod(n)
return n
class Module(LocalsDictNodeNG):
"""Class representing an :class:`ast.Module` node.
>>> import astroid
>>> node = astroid.extract_node('import astroid')
>>> node
<Import l.1 at 0x7f23b2e4e5c0>
>>> node.parent
<Module l.0 at 0x7f23b2e4eda0>
"""
_astroid_fields = ("doc_node", "body")
fromlineno: Literal[0] = 0
"""The first line that this node appears on in the source code."""
lineno: Literal[0] = 0
"""The line that this node appears on in the source code."""
# attributes below are set by the builder module or by raw factories
file_bytes: Union[str, bytes, None] = None
"""The string/bytes that this ast was built from."""
file_encoding: Optional[str] = None
"""The encoding of the source file.
This is used to get unicode out of a source file.
Python 2 only.
"""
special_attributes = ModuleModel()
"""The names of special attributes that this module has."""
# names of module attributes available through the global scope
scope_attrs = {"__name__", "__doc__", "__file__", "__path__", "__package__"}
"""The names of module attributes available through the global scope."""
_other_fields = (
"name",
"doc",
"file",
"path",
"package",
"pure_python",
"future_imports",
)
_other_other_fields = ("locals", "globals")
col_offset: None
end_lineno: None
end_col_offset: None
parent: None
@decorators_mod.deprecate_arguments(doc="Use the postinit arg 'doc_node' instead")
def __init__(
self,
name: str,
doc: Optional[str] = None,
file: Optional[str] = None,
path: Optional[List[str]] = None,
package: Optional[bool] = None,
parent: None = None,
pure_python: Optional[bool] = True,
) -> None:
"""
:param name: The name of the module.
:param doc: The module docstring.
:param file: The path to the file that this ast has been extracted from.
:param path:
:param package: Whether the node represents a package or a module.
:param parent: The parent node in the syntax tree.
:param pure_python: Whether the ast was built from source.
"""
self.name = name
"""The name of the module."""
self._doc = doc
"""The module docstring."""
self.file = file
"""The path to the file that this ast has been extracted from.
This will be ``None`` when the representation has been built from a
built-in module.
"""
self.path = path
self.package = package
"""Whether the node represents a package or a module."""
self.pure_python = pure_python
"""Whether the ast was built from source."""
self.globals: Dict[str, List[node_classes.NodeNG]]
"""A map of the name of a global variable to the node defining the global."""
self.locals = self.globals = {}
"""A map of the name of a local variable to the node defining the local."""
self.body: Optional[List[node_classes.NodeNG]] = []
"""The contents of the module."""
self.doc_node: Optional[Const] = None
"""The doc node associated with this node."""
self.future_imports: Set[str] = set()
"""The imports from ``__future__``."""
super().__init__(lineno=0, parent=parent)
# pylint: enable=redefined-builtin
def postinit(self, body=None, *, doc_node: Optional[Const] = None):
"""Do some setup after initialisation.
:param body: The contents of the module.
:type body: list(NodeNG) or None
:param doc_node: The doc node associated with this node.
"""
self.body = body
self.doc_node = doc_node
if doc_node:
self._doc = doc_node.value
@property
def doc(self) -> Optional[str]:
"""The module docstring."""
warnings.warn(
"The 'Module.doc' attribute is deprecated, "
"use 'Module.doc_node' instead.",
DeprecationWarning,
)
return self._doc
@doc.setter
def doc(self, value: Optional[str]) -> None:
warnings.warn(
"Setting the 'Module.doc' attribute is deprecated, "
"use 'Module.doc_node' instead.",
DeprecationWarning,
)
self._doc = value
def _get_stream(self):
if self.file_bytes is not None:
return io.BytesIO(self.file_bytes)
if self.file is not None:
# pylint: disable=consider-using-with
stream = open(self.file, "rb")
return stream
return None
def stream(self):
"""Get a stream to the underlying file or bytes.
:type: file or io.BytesIO or None
"""
return self._get_stream()
def block_range(self, lineno):
"""Get a range from where this node starts to where this node ends.
:param lineno: Unused.
:type lineno: int
:returns: The range of line numbers that this node belongs to.
:rtype: tuple(int, int)
"""
return self.fromlineno, self.tolineno
def scope_lookup(self, node, name, offset=0):
"""Lookup where the given variable is assigned.
:param node: The node to look for assignments up to.
Any assignments after the given node are ignored.
:type node: NodeNG
:param name: The name of the variable to find assignments for.
:type name: str
:param offset: The line offset to filter statements up to.
:type offset: int
:returns: This scope node and the list of assignments associated to the
given name according to the scope where it has been found (locals,
globals or builtin).
:rtype: tuple(str, list(NodeNG))
"""
if name in self.scope_attrs and name not in self.locals:
try:
return self, self.getattr(name)
except AttributeInferenceError:
return self, ()
return self._scope_lookup(node, name, offset)
def pytype(self):
"""Get the name of the type that this node represents.
:returns: The name of the type.
:rtype: str
"""
return "builtins.module"
def display_type(self):
"""A human readable type of this node.
:returns: The type of this node.
:rtype: str
"""
return "Module"
def getattr(self, name, context=None, ignore_locals=False):
if not name:
raise AttributeInferenceError(target=self, attribute=name, context=context)
result = []
name_in_locals = name in self.locals
if name in self.special_attributes and not ignore_locals and not name_in_locals:
result = [self.special_attributes.lookup(name)]
elif not ignore_locals and name_in_locals:
result = self.locals[name]
elif self.package:
try:
result = [self.import_module(name, relative_only=True)]
except (AstroidBuildingError, SyntaxError) as exc:
raise AttributeInferenceError(
target=self, attribute=name, context=context
) from exc
result = [n for n in result if not isinstance(n, node_classes.DelName)]
if result:
return result
raise AttributeInferenceError(target=self, attribute=name, context=context)
def igetattr(self, name, context=None):
"""Infer the possible values of the given variable.
:param name: The name of the variable to infer.
:type name: str
:returns: The inferred possible values.
:rtype: iterable(NodeNG) or None
"""
# set lookup name since this is necessary to infer on import nodes for
# instance
context = copy_context(context)
context.lookupname = name
try:
return bases._infer_stmts(self.getattr(name, context), context, frame=self)
except AttributeInferenceError as error:
raise InferenceError(
str(error), target=self, attribute=name, context=context
) from error
def fully_defined(self):
"""Check if this module has been build from a .py file.
If so, the module contains a complete representation,
including the code.
:returns: True if the module has been built from a .py file.
:rtype: bool
"""
return self.file is not None and self.file.endswith(".py")
@overload
def statement(self, *, future: None = ...) -> "Module":
...
@overload
def statement(self, *, future: Literal[True]) -> NoReturn:
...
def statement(
self, *, future: Literal[None, True] = None
) -> Union["NoReturn", "Module"]:
"""The first parent node, including self, marked as statement node.
When called on a :class:`Module` with the future parameter this raises an error.
TODO: Deprecate the future parameter and only raise StatementMissing
:raises StatementMissing: If no self has no parent attribute and future is True
"""
if future:
raise StatementMissing(target=self)
warnings.warn(
"In astroid 3.0.0 NodeNG.statement() will return either a nodes.Statement "
"or raise a StatementMissing exception. nodes.Module will no longer be "
"considered a statement. This behaviour can already be triggered "
"by passing 'future=True' to a statement() call.",
DeprecationWarning,
)
return self
def previous_sibling(self):
"""The previous sibling statement.
:returns: The previous sibling statement node.
:rtype: NodeNG or None
"""
def next_sibling(self):
"""The next sibling statement node.
:returns: The next sibling statement node.
:rtype: NodeNG or None
"""
_absolute_import_activated = True
def absolute_import_activated(self):
"""Whether :pep:`328` absolute import behaviour has been enabled.
:returns: True if :pep:`328` has been enabled, False otherwise.
:rtype: bool
"""
return self._absolute_import_activated
def import_module(self, modname, relative_only=False, level=None):
"""Get the ast for a given module as if imported from this module.
:param modname: The name of the module to "import".
:type modname: str
:param relative_only: Whether to only consider relative imports.
:type relative_only: bool
:param level: The level of relative import.
:type level: int or None
:returns: The imported module ast.
:rtype: NodeNG
"""
if relative_only and level is None:
level = 0
absmodname = self.relative_to_absolute_name(modname, level)
try:
return AstroidManager().ast_from_module_name(absmodname)
except AstroidBuildingError:
# we only want to import a sub module or package of this module,
# skip here
if relative_only:
raise
return AstroidManager().ast_from_module_name(modname)
def relative_to_absolute_name(self, modname: str, level: int) -> str:
"""Get the absolute module name for a relative import.
The relative import can be implicit or explicit.
:param modname: The module name to convert.
:param level: The level of relative import.
:returns: The absolute module name.
:raises TooManyLevelsError: When the relative import refers to a
module too far above this one.
"""
# XXX this returns non sens when called on an absolute import
# like 'pylint.checkers.astroid.utils'
# XXX doesn't return absolute name if self.name isn't absolute name
if self.absolute_import_activated() and level is None:
return modname
if level:
if self.package:
level = level - 1
package_name = self.name.rsplit(".", level)[0]
elif (
self.path
and not os.path.exists(os.path.dirname(self.path[0]) + "/__init__.py")
and os.path.exists(
os.path.dirname(self.path[0]) + "/" + modname.split(".")[0]
)
):
level = level - 1
package_name = ""
else:
package_name = self.name.rsplit(".", level)[0]
if level and self.name.count(".") < level:
raise TooManyLevelsError(level=level, name=self.name)
elif self.package:
package_name = self.name
else:
package_name = self.name.rsplit(".", 1)[0]
if package_name:
if not modname:
return package_name
return f"{package_name}.{modname}"
return modname
def wildcard_import_names(self):
"""The list of imported names when this module is 'wildcard imported'.
It doesn't include the '__builtins__' name which is added by the
current CPython implementation of wildcard imports.
:returns: The list of imported names.
:rtype: list(str)
"""
# We separate the different steps of lookup in try/excepts
# to avoid catching too many Exceptions
default = [name for name in self.keys() if not name.startswith("_")]
try:
all_values = self["__all__"]
except KeyError:
return default
try:
explicit = next(all_values.assigned_stmts())
except (InferenceError, StopIteration):
return default
except AttributeError:
# not an assignment node
# XXX infer?
return default
# Try our best to detect the exported name.
inferred = []
try:
explicit = next(explicit.infer())
except (InferenceError, StopIteration):
return default
if not isinstance(explicit, (node_classes.Tuple, node_classes.List)):
return default
def str_const(node):
return isinstance(node, node_classes.Const) and isinstance(node.value, str)
for node in explicit.elts:
if str_const(node):
inferred.append(node.value)
else:
try:
inferred_node = next(node.infer())
except (InferenceError, StopIteration):
continue
if str_const(inferred_node):
inferred.append(inferred_node.value)
return inferred
def public_names(self):
"""The list of the names that are publicly available in this module.
:returns: The list of public names.
:rtype: list(str)
"""
return [name for name in self.keys() if not name.startswith("_")]
def bool_value(self, context=None):
"""Determine the boolean value of this node.
:returns: The boolean value of this node.
For a :class:`Module` this is always ``True``.
:rtype: bool
"""
return True
def get_children(self):
yield from self.body
def frame(self: T, *, future: Literal[None, True] = None) -> T:
"""The node's frame node.
A frame node is a :class:`Module`, :class:`FunctionDef`,
:class:`ClassDef` or :class:`Lambda`.
:returns: The node itself.
"""
return self
class GeneratorExp(ComprehensionScope):
"""Class representing an :class:`ast.GeneratorExp` node.
>>> import astroid
>>> node = astroid.extract_node('(thing for thing in things if thing)')
>>> node
<GeneratorExp l.1 at 0x7f23b2e4e400>
"""
_astroid_fields = ("elt", "generators")
_other_other_fields = ("locals",)
elt = None
"""The element that forms the output of the expression.
:type: NodeNG or None
"""
generators = None
"""The generators that are looped through.
:type: list(Comprehension) or None
"""
def __init__(
self,
lineno=None,
col_offset=None,
parent=None,
*,
end_lineno=None,
end_col_offset=None,
):
"""
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
:param end_lineno: The last line this node appears on in the source code.
:type end_lineno: Optional[int]
:param end_col_offset: The end column this node appears on in the
source code. Note: This is after the last symbol.
:type end_col_offset: Optional[int]
"""
self.locals = {}
"""A map of the name of a local variable to the node defining the local.
:type: dict(str, NodeNG)
"""
super().__init__(
lineno=lineno,
col_offset=col_offset,
end_lineno=end_lineno,
end_col_offset=end_col_offset,
parent=parent,
)
def postinit(self, elt=None, generators=None):
"""Do some setup after initialisation.
:param elt: The element that forms the output of the expression.
:type elt: NodeNG or None
:param generators: The generators that are looped through.
:type generators: list(Comprehension) or None
"""
self.elt = elt
if generators is None:
self.generators = []
else:
self.generators = generators
def bool_value(self, context=None):
"""Determine the boolean value of this node.
:returns: The boolean value of this node.
For a :class:`GeneratorExp` this is always ``True``.
:rtype: bool
"""
return True
def get_children(self):
yield self.elt
yield from self.generators
class DictComp(ComprehensionScope):
"""Class representing an :class:`ast.DictComp` node.
>>> import astroid
>>> node = astroid.extract_node('{k:v for k, v in things if k > v}')
>>> node
<DictComp l.1 at 0x7f23b2e41d68>
"""
_astroid_fields = ("key", "value", "generators")
_other_other_fields = ("locals",)
key = None
"""What produces the keys.
:type: NodeNG or None
"""
value = None
"""What produces the values.
:type: NodeNG or None
"""
generators = None
"""The generators that are looped through.
:type: list(Comprehension) or None
"""
def __init__(
self,
lineno=None,
col_offset=None,
parent=None,
*,
end_lineno=None,
end_col_offset=None,
):
"""
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
:param end_lineno: The last line this node appears on in the source code.
:type end_lineno: Optional[int]
:param end_col_offset: The end column this node appears on in the
source code. Note: This is after the last symbol.
:type end_col_offset: Optional[int]
"""
self.locals = {}
"""A map of the name of a local variable to the node defining the local.
:type: dict(str, NodeNG)
"""
super().__init__(
lineno=lineno,
col_offset=col_offset,
end_lineno=end_lineno,
end_col_offset=end_col_offset,
parent=parent,
)
def postinit(self, key=None, value=None, generators=None):
"""Do some setup after initialisation.
:param key: What produces the keys.
:type key: NodeNG or None
:param value: What produces the values.
:type value: NodeNG or None
:param generators: The generators that are looped through.
:type generators: list(Comprehension) or None
"""
self.key = key
self.value = value
if generators is None:
self.generators = []
else:
self.generators = generators
def bool_value(self, context=None):
"""Determine the boolean value of this node.
:returns: The boolean value of this node.
For a :class:`DictComp` this is always :class:`Uninferable`.
:rtype: Uninferable
"""
return util.Uninferable
def get_children(self):
yield self.key
yield self.value
yield from self.generators
class SetComp(ComprehensionScope):
"""Class representing an :class:`ast.SetComp` node.
>>> import astroid
>>> node = astroid.extract_node('{thing for thing in things if thing}')
>>> node
<SetComp l.1 at 0x7f23b2e41898>
"""
_astroid_fields = ("elt", "generators")
_other_other_fields = ("locals",)
elt = None
"""The element that forms the output of the expression.
:type: NodeNG or None
"""
generators = None
"""The generators that are looped through.
:type: list(Comprehension) or None
"""
def __init__(
self,
lineno=None,
col_offset=None,
parent=None,
*,
end_lineno=None,
end_col_offset=None,
):
"""
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
:param end_lineno: The last line this node appears on in the source code.
:type end_lineno: Optional[int]
:param end_col_offset: The end column this node appears on in the
source code. Note: This is after the last symbol.
:type end_col_offset: Optional[int]
"""
self.locals = {}
"""A map of the name of a local variable to the node defining the local.
:type: dict(str, NodeNG)
"""
super().__init__(
lineno=lineno,
col_offset=col_offset,
end_lineno=end_lineno,
end_col_offset=end_col_offset,
parent=parent,
)
def postinit(self, elt=None, generators=None):
"""Do some setup after initialisation.
:param elt: The element that forms the output of the expression.
:type elt: NodeNG or None
:param generators: The generators that are looped through.
:type generators: list(Comprehension) or None
"""
self.elt = elt
if generators is None:
self.generators = []
else:
self.generators = generators
def bool_value(self, context=None):
"""Determine the boolean value of this node.
:returns: The boolean value of this node.
For a :class:`SetComp` this is always :class:`Uninferable`.
:rtype: Uninferable
"""
return util.Uninferable
def get_children(self):
yield self.elt
yield from self.generators
class ListComp(ComprehensionScope):
"""Class representing an :class:`ast.ListComp` node.
>>> import astroid
>>> node = astroid.extract_node('[thing for thing in things if thing]')
>>> node
<ListComp l.1 at 0x7f23b2e418d0>
"""
_astroid_fields = ("elt", "generators")
_other_other_fields = ("locals",)
elt = None
"""The element that forms the output of the expression.
:type: NodeNG or None
"""
generators = None
"""The generators that are looped through.
:type: list(Comprehension) or None
"""
def __init__(
self,
lineno=None,
col_offset=None,
parent=None,
*,
end_lineno=None,
end_col_offset=None,
):
self.locals = {}
"""A map of the name of a local variable to the node defining it.
:type: dict(str, NodeNG)
"""
super().__init__(
lineno=lineno,
col_offset=col_offset,
end_lineno=end_lineno,
end_col_offset=end_col_offset,
parent=parent,
)
def postinit(self, elt=None, generators=None):
"""Do some setup after initialisation.
:param elt: The element that forms the output of the expression.
:type elt: NodeNG or None
:param generators: The generators that are looped through.
:type generators: list(Comprehension) or None
"""
self.elt = elt
self.generators = generators
def bool_value(self, context=None):
"""Determine the boolean value of this node.
:returns: The boolean value of this node.
For a :class:`ListComp` this is always :class:`Uninferable`.
:rtype: Uninferable
"""
return util.Uninferable
def get_children(self):
yield self.elt
yield from self.generators
def _infer_decorator_callchain(node):
"""Detect decorator call chaining and see if the end result is a
static or a classmethod.
"""
if not isinstance(node, FunctionDef):
return None
if not node.parent:
return None
try:
result = next(node.infer_call_result(node.parent), None)
except InferenceError:
return None
if isinstance(result, bases.Instance):
result = result._proxied
if isinstance(result, ClassDef):
if result.is_subtype_of("builtins.classmethod"):
return "classmethod"
if result.is_subtype_of("builtins.staticmethod"):
return "staticmethod"
if isinstance(result, FunctionDef):
if not result.decorators:
return None
# Determine if this function is decorated with one of the builtin descriptors we want.
for decorator in result.decorators.nodes:
if isinstance(decorator, node_classes.Name):
if decorator.name in BUILTIN_DESCRIPTORS:
return decorator.name
if (
isinstance(decorator, node_classes.Attribute)
and isinstance(decorator.expr, node_classes.Name)
and decorator.expr.name == "builtins"
and decorator.attrname in BUILTIN_DESCRIPTORS
):
return decorator.attrname
return None
class Lambda(mixins.FilterStmtsMixin, LocalsDictNodeNG):
"""Class representing an :class:`ast.Lambda` node.
>>> import astroid
>>> node = astroid.extract_node('lambda arg: arg + 1')
>>> node
<Lambda.<lambda> l.1 at 0x7f23b2e41518>
"""
_astroid_fields = ("args", "body")
_other_other_fields = ("locals",)
name = "<lambda>"
is_lambda = True
special_attributes = FunctionModel()
"""The names of special attributes that this function has."""
def implicit_parameters(self):
return 0
# function's type, 'function' | 'method' | 'staticmethod' | 'classmethod'
@property
def type(self):
"""Whether this is a method or function.
:returns: 'method' if this is a method, 'function' otherwise.
:rtype: str
"""
if self.args.arguments and self.args.arguments[0].name == "self":
if isinstance(self.parent.scope(), ClassDef):
return "method"
return "function"
def __init__(
self,
lineno=None,
col_offset=None,
parent=None,
*,
end_lineno=None,
end_col_offset=None,
):
"""
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
:param end_lineno: The last line this node appears on in the source code.
:type end_lineno: Optional[int]
:param end_col_offset: The end column this node appears on in the
source code. Note: This is after the last symbol.
:type end_col_offset: Optional[int]
"""
self.locals = {}
"""A map of the name of a local variable to the node defining it.
:type: dict(str, NodeNG)
"""
self.args: Arguments
"""The arguments that the function takes."""
self.body = []
"""The contents of the function body.
:type: list(NodeNG)
"""
self.instance_attrs: Dict[str, List[NodeNG]] = {}
super().__init__(
lineno=lineno,
col_offset=col_offset,
end_lineno=end_lineno,
end_col_offset=end_col_offset,
parent=parent,
)
def postinit(self, args: Arguments, body):
"""Do some setup after initialisation.
:param args: The arguments that the function takes.
:param body: The contents of the function body.
:type body: list(NodeNG)
"""
self.args = args
self.body = body
def pytype(self):
"""Get the name of the type that this node represents.
:returns: The name of the type.
:rtype: str
"""
if "method" in self.type:
return "builtins.instancemethod"
return "builtins.function"
def display_type(self):
"""A human readable type of this node.
:returns: The type of this node.
:rtype: str
"""
if "method" in self.type:
return "Method"
return "Function"
def callable(self):
"""Whether this node defines something that is callable.
:returns: True if this defines something that is callable,
False otherwise.
For a :class:`Lambda` this is always ``True``.
:rtype: bool
"""
return True
def argnames(self) -> List[str]:
"""Get the names of each of the arguments, including that
of the collections of variable-length arguments ("args", "kwargs",
etc.), as well as positional-only and keyword-only arguments.
:returns: The names of the arguments.
:rtype: list(str)
"""
if self.args.arguments: # maybe None with builtin functions
names = _rec_get_names(self.args.arguments)
else:
names = []
if self.args.vararg:
names.append(self.args.vararg)
names += [elt.name for elt in self.args.kwonlyargs]
if self.args.kwarg:
names.append(self.args.kwarg)
return names
def infer_call_result(self, caller, context=None):
"""Infer what the function returns when called.
:param caller: Unused
:type caller: object
"""
# pylint: disable=no-member; github.com/pycqa/astroid/issues/291
# args is in fact redefined later on by postinit. Can't be changed
# to None due to a strong interaction between Lambda and FunctionDef.
return self.body.infer(context)
def scope_lookup(self, node, name, offset=0):
"""Lookup where the given names is assigned.
:param node: The node to look for assignments up to.
Any assignments after the given node are ignored.
:type node: NodeNG
:param name: The name to find assignments for.
:type name: str
:param offset: The line offset to filter statements up to.
:type offset: int
:returns: This scope node and the list of assignments associated to the
given name according to the scope where it has been found (locals,
globals or builtin).
:rtype: tuple(str, list(NodeNG))
"""
if node in self.args.defaults or node in self.args.kw_defaults:
frame = self.parent.frame(future=True)
# line offset to avoid that def func(f=func) resolve the default
# value to the defined function
offset = -1
else:
# check this is not used in function decorators
frame = self
return frame._scope_lookup(node, name, offset)
def bool_value(self, context=None):
"""Determine the boolean value of this node.
:returns: The boolean value of this node.
For a :class:`Lambda` this is always ``True``.
:rtype: bool
"""
return True
def get_children(self):
yield self.args
yield self.body
def frame(self: T, *, future: Literal[None, True] = None) -> T:
"""The node's frame node.
A frame node is a :class:`Module`, :class:`FunctionDef`,
:class:`ClassDef` or :class:`Lambda`.
:returns: The node itself.
"""
return self
def getattr(
self, name: str, context: Optional[InferenceContext] = None
) -> List[NodeNG]:
if not name:
raise AttributeInferenceError(target=self, attribute=name, context=context)
found_attrs = []
if name in self.instance_attrs:
found_attrs = self.instance_attrs[name]
if name in self.special_attributes:
found_attrs.append(self.special_attributes.lookup(name))
if found_attrs:
return found_attrs
raise AttributeInferenceError(target=self, attribute=name)
class FunctionDef(mixins.MultiLineBlockMixin, node_classes.Statement, Lambda):
"""Class representing an :class:`ast.FunctionDef`.
>>> import astroid
>>> node = astroid.extract_node('''
... def my_func(arg):
... return arg + 1
... ''')
>>> node
<FunctionDef.my_func l.2 at 0x7f23b2e71e10>
"""
_astroid_fields = ("decorators", "args", "returns", "doc_node", "body")
_multi_line_block_fields = ("body",)
returns = None
decorators: Optional[node_classes.Decorators] = None
"""The decorators that are applied to this method or function."""
is_function = True
"""Whether this node indicates a function.
For a :class:`FunctionDef` this is always ``True``.
:type: bool
"""
type_annotation = None
"""If present, this will contain the type annotation passed by a type comment
:type: NodeNG or None
"""
type_comment_args = None
"""
If present, this will contain the type annotation for arguments
passed by a type comment
"""
type_comment_returns = None
"""If present, this will contain the return type annotation, passed by a type comment"""
# attributes below are set by the builder module or by raw factories
_other_fields = ("name", "doc", "position")
_other_other_fields = (
"locals",
"_type",
"type_comment_returns",
"type_comment_args",
)
_type = None
@decorators_mod.deprecate_arguments(doc="Use the postinit arg 'doc_node' instead")
def __init__(
self,
name=None,
doc: Optional[str] = None,
lineno=None,
col_offset=None,
parent=None,
*,
end_lineno=None,
end_col_offset=None,
):
"""
:param name: The name of the function.
:type name: str or None
:param doc: The function docstring.
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
:param end_lineno: The last line this node appears on in the source code.
:type end_lineno: Optional[int]
:param end_col_offset: The end column this node appears on in the
source code. Note: This is after the last symbol.
:type end_col_offset: Optional[int]
"""
self.name = name
"""The name of the function.
:type name: str or None
"""
self._doc = doc
"""The function docstring."""
self.doc_node: Optional[Const] = None
"""The doc node associated with this node."""
self.instance_attrs = {}
super().__init__(
lineno=lineno,
col_offset=col_offset,
end_lineno=end_lineno,
end_col_offset=end_col_offset,
parent=parent,
)
if parent:
frame = parent.frame(future=True)
frame.set_local(name, self)
# pylint: disable=arguments-differ; different than Lambdas
def postinit(
self,
args: Arguments,
body,
decorators: Optional[node_classes.Decorators] = None,
returns=None,
type_comment_returns=None,
type_comment_args=None,
*,
position: Optional[Position] = None,
doc_node: Optional[Const] = None,
):
"""Do some setup after initialisation.
:param args: The arguments that the function takes.
:param body: The contents of the function body.
:type body: list(NodeNG)
:param decorators: The decorators that are applied to this
method or function.
:type decorators: Decorators or None
:params type_comment_returns:
The return type annotation passed via a type comment.
:params type_comment_args:
The args type annotation passed via a type comment.
:params position:
Position of function keyword(s) and name.
:param doc_node:
The doc node associated with this node.
"""
self.args = args
self.body = body
self.decorators = decorators
self.returns = returns
self.type_comment_returns = type_comment_returns
self.type_comment_args = type_comment_args
self.position = position
self.doc_node = doc_node
if doc_node:
self._doc = doc_node.value
@property
def doc(self) -> Optional[str]:
"""The function docstring."""
warnings.warn(
"The 'FunctionDef.doc' attribute is deprecated, "
"use 'FunctionDef.doc_node' instead.",
DeprecationWarning,
)
return self._doc
@doc.setter
def doc(self, value: Optional[str]) -> None:
warnings.warn(
"Setting the 'FunctionDef.doc' attribute is deprecated, "
"use 'FunctionDef.doc_node' instead.",
DeprecationWarning,
)
self._doc = value
@cached_property
def extra_decorators(self) -> List[node_classes.Call]:
"""The extra decorators that this function can have.
Additional decorators are considered when they are used as
assignments, as in ``method = staticmethod(method)``.
The property will return all the callables that are used for
decoration.
"""
frame = self.parent.frame(future=True)
if not isinstance(frame, ClassDef):
return []
decorators: List[node_classes.Call] = []
for assign in frame._get_assign_nodes():
if isinstance(assign.value, node_classes.Call) and isinstance(
assign.value.func, node_classes.Name
):
for assign_node in assign.targets:
if not isinstance(assign_node, node_classes.AssignName):
# Support only `name = callable(name)`
continue
if assign_node.name != self.name:
# Interested only in the assignment nodes that
# decorates the current method.
continue
try:
meth = frame[self.name]
except KeyError:
continue
else:
# Must be a function and in the same frame as the
# original method.
if (
isinstance(meth, FunctionDef)
and assign_node.frame(future=True) == frame
):
decorators.append(assign.value)
return decorators
@cached_property
def type(
self,
): # pylint: disable=invalid-overridden-method,too-many-return-statements
"""The function type for this node.
Possible values are: method, function, staticmethod, classmethod.
:type: str
"""
for decorator in self.extra_decorators:
if decorator.func.name in BUILTIN_DESCRIPTORS:
return decorator.func.name
frame = self.parent.frame(future=True)
type_name = "function"
if isinstance(frame, ClassDef):
if self.name == "__new__":
return "classmethod"
if self.name == "__init_subclass__":
return "classmethod"
if self.name == "__class_getitem__":
return "classmethod"
type_name = "method"
if not self.decorators:
return type_name
for node in self.decorators.nodes:
if isinstance(node, node_classes.Name):
if node.name in BUILTIN_DESCRIPTORS:
return node.name
if (
isinstance(node, node_classes.Attribute)
and isinstance(node.expr, node_classes.Name)
and node.expr.name == "builtins"
and node.attrname in BUILTIN_DESCRIPTORS
):
return node.attrname
if isinstance(node, node_classes.Call):
# Handle the following case:
# @some_decorator(arg1, arg2)
# def func(...)
#
try:
current = next(node.func.infer())
except (InferenceError, StopIteration):
continue
_type = _infer_decorator_callchain(current)
if _type is not None:
return _type
try:
for inferred in node.infer():
# Check to see if this returns a static or a class method.
_type = _infer_decorator_callchain(inferred)
if _type is not None:
return _type
if not isinstance(inferred, ClassDef):
continue
for ancestor in inferred.ancestors():
if not isinstance(ancestor, ClassDef):
continue
if ancestor.is_subtype_of("builtins.classmethod"):
return "classmethod"
if ancestor.is_subtype_of("builtins.staticmethod"):
return "staticmethod"
except InferenceError:
pass
return type_name
@cached_property
def fromlineno(self) -> Optional[int]:
"""The first line that this node appears on in the source code."""
# lineno is the line number of the first decorator, we want the def
# statement lineno. Similar to 'ClassDef.fromlineno'
lineno = self.lineno
if self.decorators is not None:
lineno += sum(
node.tolineno - node.lineno + 1 for node in self.decorators.nodes
)
return lineno
@cached_property
def blockstart_tolineno(self):
"""The line on which the beginning of this block ends.
:type: int
"""
return self.args.tolineno
def implicit_parameters(self) -> Literal[0, 1]:
return 1 if self.is_bound() else 0
def block_range(self, lineno):
"""Get a range from the given line number to where this node ends.
:param lineno: Unused.
:type lineno: int
:returns: The range of line numbers that this node belongs to,
:rtype: tuple(int, int)
"""
return self.fromlineno, self.tolineno
def igetattr(self, name, context=None):
"""Inferred getattr, which returns an iterator of inferred statements."""
try:
return bases._infer_stmts(self.getattr(name, context), context, frame=self)
except AttributeInferenceError as error:
raise InferenceError(
str(error), target=self, attribute=name, context=context
) from error
def is_method(self):
"""Check if this function node represents a method.
:returns: True if this is a method, False otherwise.
:rtype: bool
"""
# check we are defined in a ClassDef, because this is usually expected
# (e.g. pylint...) when is_method() return True
return self.type != "function" and isinstance(
self.parent.frame(future=True), ClassDef
)
@decorators_mod.cached
def decoratornames(self, context=None):
"""Get the qualified names of each of the decorators on this function.
:param context:
An inference context that can be passed to inference functions
:returns: The names of the decorators.
:rtype: set(str)
"""
result = set()
decoratornodes = []
if self.decorators is not None:
decoratornodes += self.decorators.nodes
decoratornodes += self.extra_decorators
for decnode in decoratornodes:
try:
for infnode in decnode.infer(context=context):
result.add(infnode.qname())
except InferenceError:
continue
return result
def is_bound(self):
"""Check if the function is bound to an instance or class.
:returns: True if the function is bound to an instance or class,
False otherwise.
:rtype: bool
"""
return self.type in {"method", "classmethod"}
def is_abstract(self, pass_is_abstract=True, any_raise_is_abstract=False):
"""Check if the method is abstract.
A method is considered abstract if any of the following is true:
* The only statement is 'raise NotImplementedError'
* The only statement is 'raise <SomeException>' and any_raise_is_abstract is True
* The only statement is 'pass' and pass_is_abstract is True
* The method is annotated with abc.astractproperty/abc.abstractmethod
:returns: True if the method is abstract, False otherwise.
:rtype: bool
"""
if self.decorators:
for node in self.decorators.nodes:
try:
inferred = next(node.infer())
except (InferenceError, StopIteration):
continue
if inferred and inferred.qname() in {
"abc.abstractproperty",
"abc.abstractmethod",
}:
return True
for child_node in self.body:
if isinstance(child_node, node_classes.Raise):
if any_raise_is_abstract:
return True
if child_node.raises_not_implemented():
return True
return pass_is_abstract and isinstance(child_node, node_classes.Pass)
# empty function is the same as function with a single "pass" statement
if pass_is_abstract:
return True
def is_generator(self):
"""Check if this is a generator function.
:returns: True is this is a generator function, False otherwise.
:rtype: bool
"""
return bool(next(self._get_yield_nodes_skip_lambdas(), False))
def infer_yield_result(self, context=None):
"""Infer what the function yields when called
:returns: What the function yields
:rtype: iterable(NodeNG or Uninferable) or None
"""
# pylint: disable=not-an-iterable
# https://github.com/PyCQA/astroid/issues/1015
for yield_ in self.nodes_of_class(node_classes.Yield):
if yield_.value is None:
const = node_classes.Const(None)
const.parent = yield_
const.lineno = yield_.lineno
yield const
elif yield_.scope() == self:
yield from yield_.value.infer(context=context)
def infer_call_result(self, caller=None, context=None):
"""Infer what the function returns when called.
:returns: What the function returns.
:rtype: iterable(NodeNG or Uninferable) or None
"""
if self.is_generator():
if isinstance(self, AsyncFunctionDef):
generator_cls = bases.AsyncGenerator
else:
generator_cls = bases.Generator
result = generator_cls(self, generator_initial_context=context)
yield result
return
# This is really a gigantic hack to work around metaclass generators
# that return transient class-generating functions. Pylint's AST structure
# cannot handle a base class object that is only used for calling __new__,
# but does not contribute to the inheritance structure itself. We inject
# a fake class into the hierarchy here for several well-known metaclass
# generators, and filter it out later.
if (
self.name == "with_metaclass"
and len(self.args.args) == 1
and self.args.vararg is not None
):
metaclass = next(caller.args[0].infer(context), None)
if isinstance(metaclass, ClassDef):
try:
class_bases = [next(arg.infer(context)) for arg in caller.args[1:]]
except StopIteration as e:
raise InferenceError(node=caller.args[1:], context=context) from e
new_class = ClassDef(name="temporary_class")
new_class.hide = True
new_class.parent = self
new_class.postinit(
bases=[base for base in class_bases if base != util.Uninferable],
body=[],
decorators=[],
metaclass=metaclass,
)
yield new_class
return
returns = self._get_return_nodes_skip_functions()
first_return = next(returns, None)
if not first_return:
if self.body:
if self.is_abstract(pass_is_abstract=True, any_raise_is_abstract=True):
yield util.Uninferable
else:
yield node_classes.Const(None)
return
raise InferenceError("The function does not have any return statements")
for returnnode in itertools.chain((first_return,), returns):
if returnnode.value is None:
yield node_classes.Const(None)
else:
try:
yield from returnnode.value.infer(context)
except InferenceError:
yield util.Uninferable
def bool_value(self, context=None):
"""Determine the boolean value of this node.
:returns: The boolean value of this node.
For a :class:`FunctionDef` this is always ``True``.
:rtype: bool
"""
return True
def get_children(self):
if self.decorators is not None:
yield self.decorators
yield self.args
if self.returns is not None:
yield self.returns
yield from self.body
def scope_lookup(self, node, name, offset=0):
"""Lookup where the given name is assigned."""
if name == "__class__":
# __class__ is an implicit closure reference created by the compiler
# if any methods in a class body refer to either __class__ or super.
# In our case, we want to be able to look it up in the current scope
# when `__class__` is being used.
frame = self.parent.frame(future=True)
if isinstance(frame, ClassDef):
return self, [frame]
return super().scope_lookup(node, name, offset)
def frame(self: T, *, future: Literal[None, True] = None) -> T:
"""The node's frame node.
A frame node is a :class:`Module`, :class:`FunctionDef`,
:class:`ClassDef` or :class:`Lambda`.
:returns: The node itself.
"""
return self
class AsyncFunctionDef(FunctionDef):
"""Class representing an :class:`ast.FunctionDef` node.
A :class:`AsyncFunctionDef` is an asynchronous function
created with the `async` keyword.
>>> import astroid
>>> node = astroid.extract_node('''
async def func(things):
async for thing in things:
print(thing)
''')
>>> node
<AsyncFunctionDef.func l.2 at 0x7f23b2e416d8>
>>> node.body[0]
<AsyncFor l.3 at 0x7f23b2e417b8>
"""
def _rec_get_names(args, names: Optional[List[str]] = None) -> List[str]:
"""return a list of all argument names"""
if names is None:
names = []
for arg in args:
if isinstance(arg, node_classes.Tuple):
_rec_get_names(arg.elts, names)
else:
names.append(arg.name)
return names
def _is_metaclass(klass, seen=None):
"""Return if the given class can be
used as a metaclass.
"""
if klass.name == "type":
return True
if seen is None:
seen = set()
for base in klass.bases:
try:
for baseobj in base.infer():
baseobj_name = baseobj.qname()
if baseobj_name in seen:
continue
seen.add(baseobj_name)
if isinstance(baseobj, bases.Instance):
# not abstract
return False
if baseobj is util.Uninferable:
continue
if baseobj is klass:
continue
if not isinstance(baseobj, ClassDef):
continue
if baseobj._type == "metaclass":
return True
if _is_metaclass(baseobj, seen):
return True
except InferenceError:
continue
return False
def _class_type(klass, ancestors=None):
"""return a ClassDef node type to differ metaclass and exception
from 'regular' classes
"""
# XXX we have to store ancestors in case we have an ancestor loop
if klass._type is not None:
return klass._type
if _is_metaclass(klass):
klass._type = "metaclass"
elif klass.name.endswith("Exception"):
klass._type = "exception"
else:
if ancestors is None:
ancestors = set()
klass_name = klass.qname()
if klass_name in ancestors:
# XXX we are in loop ancestors, and have found no type
klass._type = "class"
return "class"
ancestors.add(klass_name)
for base in klass.ancestors(recurs=False):
name = _class_type(base, ancestors)
if name != "class":
if name == "metaclass" and not _is_metaclass(klass):
# don't propagate it if the current class
# can't be a metaclass
continue
klass._type = base.type
break
if klass._type is None:
klass._type = "class"
return klass._type
def get_wrapping_class(node):
"""Get the class that wraps the given node.
We consider that a class wraps a node if the class
is a parent for the said node.
:returns: The class that wraps the given node
:rtype: ClassDef or None
"""
klass = node.frame(future=True)
while klass is not None and not isinstance(klass, ClassDef):
if klass.parent is None:
klass = None
else:
klass = klass.parent.frame(future=True)
return klass
# pylint: disable=too-many-instance-attributes
class ClassDef(mixins.FilterStmtsMixin, LocalsDictNodeNG, node_classes.Statement):
"""Class representing an :class:`ast.ClassDef` node.
>>> import astroid
>>> node = astroid.extract_node('''
class Thing:
def my_meth(self, arg):
return arg + self.offset
''')
>>> node
<ClassDef.Thing l.2 at 0x7f23b2e9e748>
"""
# some of the attributes below are set by the builder module or
# by a raw factories
# a dictionary of class instances attributes
_astroid_fields = ("decorators", "bases", "keywords", "doc_node", "body") # name
decorators = None
"""The decorators that are applied to this class.
:type: Decorators or None
"""
special_attributes = ClassModel()
"""The names of special attributes that this class has.
:type: objectmodel.ClassModel
"""
_type = None
_metaclass_hack = False
hide = False
type = property(
_class_type,
doc=(
"The class type for this node.\n\n"
"Possible values are: class, metaclass, exception.\n\n"
":type: str"
),
)
_other_fields = ("name", "doc", "is_dataclass", "position")
_other_other_fields = ("locals", "_newstyle")
_newstyle = None
@decorators_mod.deprecate_arguments(doc="Use the postinit arg 'doc_node' instead")
def __init__(
self,
name=None,
doc: Optional[str] = None,
lineno=None,
col_offset=None,
parent=None,
*,
end_lineno=None,
end_col_offset=None,
):
"""
:param name: The name of the class.
:type name: str or None
:param doc: The class docstring.
:param lineno: The line that this node appears on in the source code.
:type lineno: int or None
:param col_offset: The column that this node appears on in the
source code.
:type col_offset: int or None
:param parent: The parent node in the syntax tree.
:type parent: NodeNG or None
:param end_lineno: The last line this node appears on in the source code.
:type end_lineno: Optional[int]
:param end_col_offset: The end column this node appears on in the
source code. Note: This is after the last symbol.
:type end_col_offset: Optional[int]
"""
self.instance_attrs = {}
self.locals = {}
"""A map of the name of a local variable to the node defining it.
:type: dict(str, NodeNG)
"""
self.keywords = []
"""The keywords given to the class definition.
This is usually for :pep:`3115` style metaclass declaration.
:type: list(Keyword) or None
"""
self.bases = []
"""What the class inherits from.
:type: list(NodeNG)
"""
self.body = []
"""The contents of the class body.
:type: list(NodeNG)
"""
self.name = name
"""The name of the class.
:type name: str or None
"""
self._doc = doc
"""The class docstring."""
self.doc_node: Optional[Const] = None
"""The doc node associated with this node."""
self.is_dataclass: bool = False
"""Whether this class is a dataclass."""
super().__init__(
lineno=lineno,
col_offset=col_offset,
end_lineno=end_lineno,
end_col_offset=end_col_offset,
parent=parent,
)
if parent is not None:
parent.frame(future=True).set_local(name, self)
for local_name, node in self.implicit_locals():
self.add_local_node(node, local_name)
@property
def doc(self) -> Optional[str]:
"""The class docstring."""
warnings.warn(
"The 'ClassDef.doc' attribute is deprecated, "
"use 'ClassDef.doc_node' instead.",
DeprecationWarning,
)
return self._doc
@doc.setter
def doc(self, value: Optional[str]) -> None:
warnings.warn(
"Setting the 'ClassDef.doc' attribute is deprecated, "
"use 'ClassDef.doc_node.value' instead.",
DeprecationWarning,
)
self._doc = value
def implicit_parameters(self):
return 1
def implicit_locals(self):
"""Get implicitly defined class definition locals.
:returns: the the name and Const pair for each local
:rtype: tuple(tuple(str, node_classes.Const), ...)
"""
locals_ = (("__module__", self.special_attributes.attr___module__),)
# __qualname__ is defined in PEP3155
locals_ += (("__qualname__", self.special_attributes.attr___qualname__),)
return locals_
# pylint: disable=redefined-outer-name
def postinit(
self,
bases,
body,
decorators,
newstyle=None,
metaclass=None,
keywords=None,
*,
position: Optional[Position] = None,
doc_node: Optional[Const] = None,
):
"""Do some setup after initialisation.
:param bases: What the class inherits from.
:type bases: list(NodeNG)
:param body: The contents of the class body.
:type body: list(NodeNG)
:param decorators: The decorators that are applied to this class.
:type decorators: Decorators or None
:param newstyle: Whether this is a new style class or not.
:type newstyle: bool or None
:param metaclass: The metaclass of this class.
:type metaclass: NodeNG or None
:param keywords: The keywords given to the class definition.
:type keywords: list(Keyword) or None
:param position: Position of class keyword and name.
:param doc_node: The doc node associated with this node.
"""
if keywords is not None:
self.keywords = keywords
self.bases = bases
self.body = body
self.decorators = decorators
if newstyle is not None:
self._newstyle = newstyle
if metaclass is not None:
self._metaclass = metaclass
self.position = position
self.doc_node = doc_node
if doc_node:
self._doc = doc_node.value
def _newstyle_impl(self, context=None):
if context is None:
context = InferenceContext()
if self._newstyle is not None:
return self._newstyle
for base in self.ancestors(recurs=False, context=context):
if base._newstyle_impl(context):
self._newstyle = True
break
klass = self.declared_metaclass()
# could be any callable, we'd need to infer the result of klass(name,
# bases, dict). punt if it's not a class node.
if klass is not None and isinstance(klass, ClassDef):
self._newstyle = klass._newstyle_impl(context)
if self._newstyle is None:
self._newstyle = False
return self._newstyle
_newstyle = None
newstyle = property(
_newstyle_impl,
doc=("Whether this is a new style class or not\n\n" ":type: bool or None"),
)
@cached_property
def fromlineno(self) -> Optional[int]:
"""The first line that this node appears on in the source code."""
if not PY38_PLUS or PY38 and IS_PYPY:
# For Python < 3.8 the lineno is the line number of the first decorator.
# We want the class statement lineno. Similar to 'FunctionDef.fromlineno'
lineno = self.lineno
if self.decorators is not None:
lineno += sum(
node.tolineno - node.lineno + 1 for node in self.decorators.nodes
)
return lineno
return super().fromlineno
@cached_property
def blockstart_tolineno(self):
"""The line on which the beginning of this block ends.
:type: int
"""
if self.bases:
return self.bases[-1].tolineno
return self.fromlineno
def block_range(self, lineno):
"""Get a range from the given line number to where this node ends.
:param lineno: Unused.
:type lineno: int
:returns: The range of line numbers that this node belongs to,
:rtype: tuple(int, int)
"""
return self.fromlineno, self.tolineno
def pytype(self):
"""Get the name of the type that this node represents.
:returns: The name of the type.
:rtype: str
"""
if self.newstyle:
return "builtins.type"
return "builtins.classobj"
def display_type(self):
"""A human readable type of this node.
:returns: The type of this node.
:rtype: str
"""
return "Class"
def callable(self):
"""Whether this node defines something that is callable.
:returns: True if this defines something that is callable,
False otherwise.
For a :class:`ClassDef` this is always ``True``.
:rtype: bool
"""
return True
def is_subtype_of(self, type_name, context=None):
"""Whether this class is a subtype of the given type.
:param type_name: The name of the type of check against.
:type type_name: str
:returns: True if this class is a subtype of the given type,
False otherwise.
:rtype: bool
"""
if self.qname() == type_name:
return True
return any(anc.qname() == type_name for anc in self.ancestors(context=context))
def _infer_type_call(self, caller, context):
try:
name_node = next(caller.args[0].infer(context))
except StopIteration as e:
raise InferenceError(node=caller.args[0], context=context) from e
if isinstance(name_node, node_classes.Const) and isinstance(
name_node.value, str
):
name = name_node.value
else:
return util.Uninferable
result = ClassDef(name)
# Get the bases of the class.
try:
class_bases = next(caller.args[1].infer(context))
except StopIteration as e:
raise InferenceError(node=caller.args[1], context=context) from e
if isinstance(class_bases, (node_classes.Tuple, node_classes.List)):
bases = []
for base in class_bases.itered():
inferred = next(base.infer(context=context), None)
if inferred:
bases.append(
node_classes.EvaluatedObject(original=base, value=inferred)
)
result.bases = bases
else:
# There is currently no AST node that can represent an 'unknown'
# node (Uninferable is not an AST node), therefore we simply return Uninferable here
# although we know at least the name of the class.
return util.Uninferable
# Get the members of the class
try:
members = next(caller.args[2].infer(context))
except (InferenceError, StopIteration):
members = None
if members and isinstance(members, node_classes.Dict):
for attr, value in members.items:
if isinstance(attr, node_classes.Const) and isinstance(attr.value, str):
result.locals[attr.value] = [value]
result.parent = caller.parent
return result
def infer_call_result(self, caller, context=None):
"""infer what a class is returning when called"""
if self.is_subtype_of("builtins.type", context) and len(caller.args) == 3:
result = self._infer_type_call(caller, context)
yield result
return
dunder_call = None
try:
metaclass = self.metaclass(context=context)
if metaclass is not None:
dunder_call = next(metaclass.igetattr("__call__", context))
except (AttributeInferenceError, StopIteration):
pass
if dunder_call and dunder_call.qname() != "builtins.type.__call__":
# Call type.__call__ if not set metaclass
# (since type is the default metaclass)
context = bind_context_to_node(context, self)
context.callcontext.callee = dunder_call
yield from dunder_call.infer_call_result(caller, context)
else:
yield self.instantiate_class()
def scope_lookup(self, node, name, offset=0):
"""Lookup where the given name is assigned.
:param node: The node to look for assignments up to.
Any assignments after the given node are ignored.
:type node: NodeNG
:param name: The name to find assignments for.
:type name: str
:param offset: The line offset to filter statements up to.
:type offset: int
:returns: This scope node and the list of assignments associated to the
given name according to the scope where it has been found (locals,
globals or builtin).
:rtype: tuple(str, list(NodeNG))
"""
# If the name looks like a builtin name, just try to look
# into the upper scope of this class. We might have a
# decorator that it's poorly named after a builtin object
# inside this class.
lookup_upper_frame = (
isinstance(node.parent, node_classes.Decorators)
and name in AstroidManager().builtins_module
)
if (
any(node == base or base.parent_of(node) for base in self.bases)
or lookup_upper_frame
):
# Handle the case where we have either a name
# in the bases of a class, which exists before
# the actual definition or the case where we have
# a Getattr node, with that name.
#
# name = ...
# class A(name):
# def name(self): ...
#
# import name
# class A(name.Name):
# def name(self): ...
frame = self.parent.frame(future=True)
# line offset to avoid that class A(A) resolve the ancestor to
# the defined class
offset = -1
else:
frame = self
return frame._scope_lookup(node, name, offset)
@property
def basenames(self):
"""The names of the parent classes
Names are given in the order they appear in the class definition.
:type: list(str)
"""
return [bnode.as_string() for bnode in self.bases]
def ancestors(self, recurs=True, context=None):
"""Iterate over the base classes in prefixed depth first order.
:param recurs: Whether to recurse or return direct ancestors only.
:type recurs: bool
:returns: The base classes
:rtype: iterable(NodeNG)
"""
# FIXME: should be possible to choose the resolution order
# FIXME: inference make infinite loops possible here
yielded = {self}
if context is None:
context = InferenceContext()
if not self.bases and self.qname() != "builtins.object":
yield builtin_lookup("object")[1][0]
return
for stmt in self.bases:
with context.restore_path():
try:
for baseobj in stmt.infer(context):
if not isinstance(baseobj, ClassDef):
if isinstance(baseobj, bases.Instance):
baseobj = baseobj._proxied
else:
continue
if not baseobj.hide:
if baseobj in yielded:
continue
yielded.add(baseobj)
yield baseobj
if not recurs:
continue
for grandpa in baseobj.ancestors(recurs=True, context=context):
if grandpa is self:
# This class is the ancestor of itself.
break
if grandpa in yielded:
continue
yielded.add(grandpa)
yield grandpa
except InferenceError:
continue
def local_attr_ancestors(self, name, context=None):
"""Iterate over the parents that define the given name.
:param name: The name to find definitions for.
:type name: str
:returns: The parents that define the given name.
:rtype: iterable(NodeNG)
"""
# Look up in the mro if we can. This will result in the
# attribute being looked up just as Python does it.
try:
ancestors = self.mro(context)[1:]
except MroError:
# Fallback to use ancestors, we can't determine
# a sane MRO.
ancestors = self.ancestors(context=context)
for astroid in ancestors:
if name in astroid:
yield astroid
def instance_attr_ancestors(self, name, context=None):
"""Iterate over the parents that define the given name as an attribute.
:param name: The name to find definitions for.
:type name: str
:returns: The parents that define the given name as
an instance attribute.
:rtype: iterable(NodeNG)
"""
for astroid in self.ancestors(context=context):
if name in astroid.instance_attrs:
yield astroid
def has_base(self, node):
"""Whether this class directly inherits from the given node.
:param node: The node to check for.
:type node: NodeNG
:returns: True if this class directly inherits from the given node.
:rtype: bool
"""
return node in self.bases
def local_attr(self, name, context=None):
"""Get the list of assign nodes associated to the given name.
Assignments are looked for in both this class and in parents.
:returns: The list of assignments to the given name.
:rtype: list(NodeNG)
:raises AttributeInferenceError: If no attribute with this name
can be found in this class or parent classes.
"""
result = []
if name in self.locals:
result = self.locals[name]
else:
class_node = next(self.local_attr_ancestors(name, context), None)
if class_node:
result = class_node.locals[name]
result = [n for n in result if not isinstance(n, node_classes.DelAttr)]
if result:
return result
raise AttributeInferenceError(target=self, attribute=name, context=context)
def instance_attr(self, name, context=None):
"""Get the list of nodes associated to the given attribute name.
Assignments are looked for in both this class and in parents.
:returns: The list of assignments to the given name.
:rtype: list(NodeNG)
:raises AttributeInferenceError: If no attribute with this name
can be found in this class or parent classes.
"""
# Return a copy, so we don't modify self.instance_attrs,
# which could lead to infinite loop.
values = list(self.instance_attrs.get(name, []))
# get all values from parents
for class_node in self.instance_attr_ancestors(name, context):
values += class_node.instance_attrs[name]
values = [n for n in values if not isinstance(n, node_classes.DelAttr)]
if values:
return values
raise AttributeInferenceError(target=self, attribute=name, context=context)
def instantiate_class(self):
"""Get an :class:`Instance` of the :class:`ClassDef` node.
:returns: An :class:`Instance` of the :class:`ClassDef` node,
or self if this is not possible.
:rtype: Instance or ClassDef
"""
try:
if any(cls.name in EXCEPTION_BASE_CLASSES for cls in self.mro()):
# Subclasses of exceptions can be exception instances
return objects.ExceptionInstance(self)
except MroError:
pass
return bases.Instance(self)
def getattr(self, name, context=None, class_context=True):
"""Get an attribute from this class, using Python's attribute semantic.
This method doesn't look in the :attr:`instance_attrs` dictionary
since it is done by an :class:`Instance` proxy at inference time.
It may return an :class:`Uninferable` object if
the attribute has not been
found, but a ``__getattr__`` or ``__getattribute__`` method is defined.
If ``class_context`` is given, then it is considered that the
attribute is accessed from a class context,
e.g. ClassDef.attribute, otherwise it might have been accessed
from an instance as well. If ``class_context`` is used in that
case, then a lookup in the implicit metaclass and the explicit
metaclass will be done.
:param name: The attribute to look for.
:type name: str
:param class_context: Whether the attribute can be accessed statically.
:type class_context: bool
:returns: The attribute.
:rtype: list(NodeNG)
:raises AttributeInferenceError: If the attribute cannot be inferred.
"""
if not name:
raise AttributeInferenceError(target=self, attribute=name, context=context)
values = self.locals.get(name, [])
if name in self.special_attributes and class_context and not values:
result = [self.special_attributes.lookup(name)]
if name == "__bases__":
# Need special treatment, since they are mutable
# and we need to return all the values.
result += values
return result
# don't modify the list in self.locals!
values = list(values)
for classnode in self.ancestors(recurs=True, context=context):
values += classnode.locals.get(name, [])
if class_context:
values += self._metaclass_lookup_attribute(name, context)
if not values:
raise AttributeInferenceError(target=self, attribute=name, context=context)
# Look for AnnAssigns, which are not attributes in the purest sense.
for value in values:
if isinstance(value, node_classes.AssignName):
stmt = value.statement(future=True)
if isinstance(stmt, node_classes.AnnAssign) and stmt.value is None:
raise AttributeInferenceError(
target=self, attribute=name, context=context
)
return values
def _metaclass_lookup_attribute(self, name, context):
"""Search the given name in the implicit and the explicit metaclass."""
attrs = set()
implicit_meta = self.implicit_metaclass()
context = copy_context(context)
metaclass = self.metaclass(context=context)
for cls in (implicit_meta, metaclass):
if cls and cls != self and isinstance(cls, ClassDef):
cls_attributes = self._get_attribute_from_metaclass(cls, name, context)
attrs.update(set(cls_attributes))
return attrs
def _get_attribute_from_metaclass(self, cls, name, context):
try:
attrs = cls.getattr(name, context=context, class_context=True)
except AttributeInferenceError:
return
for attr in bases._infer_stmts(attrs, context, frame=cls):
if not isinstance(attr, FunctionDef):
yield attr
continue
if isinstance(attr, objects.Property):
yield attr
continue
if attr.type == "classmethod":
# If the method is a classmethod, then it will
# be bound to the metaclass, not to the class
# from where the attribute is retrieved.
# get_wrapping_class could return None, so just
# default to the current class.
frame = get_wrapping_class(attr) or self
yield bases.BoundMethod(attr, frame)
elif attr.type == "staticmethod":
yield attr
else:
yield bases.BoundMethod(attr, self)
def igetattr(self, name, context=None, class_context=True):
"""Infer the possible values of the given variable.
:param name: The name of the variable to infer.
:type name: str
:returns: The inferred possible values.
:rtype: iterable(NodeNG or Uninferable)
"""
# set lookup name since this is necessary to infer on import nodes for
# instance
context = copy_context(context)
context.lookupname = name
metaclass = self.metaclass(context=context)
try:
attributes = self.getattr(name, context, class_context=class_context)
# If we have more than one attribute, make sure that those starting from
# the second one are from the same scope. This is to account for modifications
# to the attribute happening *after* the attribute's definition (e.g. AugAssigns on lists)
if len(attributes) > 1:
first_attr, attributes = attributes[0], attributes[1:]
first_scope = first_attr.scope()
attributes = [first_attr] + [
attr
for attr in attributes
if attr.parent and attr.parent.scope() == first_scope
]
for inferred in bases._infer_stmts(attributes, context, frame=self):
# yield Uninferable object instead of descriptors when necessary
if not isinstance(inferred, node_classes.Const) and isinstance(
inferred, bases.Instance
):
try:
inferred._proxied.getattr("__get__", context)
except AttributeInferenceError:
yield inferred
else:
yield util.Uninferable
elif isinstance(inferred, objects.Property):
function = inferred.function
if not class_context:
# Through an instance so we can solve the property
yield from function.infer_call_result(
caller=self, context=context
)
# If we're in a class context, we need to determine if the property
# was defined in the metaclass (a derived class must be a subclass of
# the metaclass of all its bases), in which case we can resolve the
# property. If not, i.e. the property is defined in some base class
# instead, then we return the property object
elif metaclass and function.parent.scope() is metaclass:
# Resolve a property as long as it is not accessed through
# the class itself.
yield from function.infer_call_result(
caller=self, context=context
)
else:
yield inferred
else:
yield function_to_method(inferred, self)
except AttributeInferenceError as error:
if not name.startswith("__") and self.has_dynamic_getattr(context):
# class handle some dynamic attributes, return a Uninferable object
yield util.Uninferable
else:
raise InferenceError(
str(error), target=self, attribute=name, context=context
) from error
def has_dynamic_getattr(self, context=None):
"""Check if the class has a custom __getattr__ or __getattribute__.
If any such method is found and it is not from
builtins, nor from an extension module, then the function
will return True.
:returns: True if the class has a custom
__getattr__ or __getattribute__, False otherwise.
:rtype: bool
"""
def _valid_getattr(node):
root = node.root()
return root.name != "builtins" and getattr(root, "pure_python", None)
try:
return _valid_getattr(self.getattr("__getattr__", context)[0])
except AttributeInferenceError:
# if self.newstyle: XXX cause an infinite recursion error
try:
getattribute = self.getattr("__getattribute__", context)[0]
return _valid_getattr(getattribute)
except AttributeInferenceError:
pass
return False
def getitem(self, index, context=None):
"""Return the inference of a subscript.
This is basically looking up the method in the metaclass and calling it.
:returns: The inferred value of a subscript to this class.
:rtype: NodeNG
:raises AstroidTypeError: If this class does not define a
``__getitem__`` method.
"""
try:
methods = lookup(self, "__getitem__")
except AttributeInferenceError as exc:
if isinstance(self, ClassDef):
# subscripting a class definition may be
# achieved thanks to __class_getitem__ method
# which is a classmethod defined in the class
# that supports subscript and not in the metaclass
try:
methods = self.getattr("__class_getitem__")
# Here it is assumed that the __class_getitem__ node is
# a FunctionDef. One possible improvement would be to deal
# with more generic inference.
except AttributeInferenceError:
raise AstroidTypeError(node=self, context=context) from exc
else:
raise AstroidTypeError(node=self, context=context) from exc
method = methods[0]
# Create a new callcontext for providing index as an argument.
new_context = bind_context_to_node(context, self)
new_context.callcontext = CallContext(args=[index], callee=method)
try:
return next(method.infer_call_result(self, new_context), util.Uninferable)
except AttributeError:
# Starting with python3.9, builtin types list, dict etc...
# are subscriptable thanks to __class_getitem___ classmethod.
# However in such case the method is bound to an EmptyNode and
# EmptyNode doesn't have infer_call_result method yielding to
# AttributeError
if (
isinstance(method, node_classes.EmptyNode)
and self.name in {"list", "dict", "set", "tuple", "frozenset"}
and PY39_PLUS
):
return self
raise
except InferenceError:
return util.Uninferable
def methods(self):
"""Iterate over all of the method defined in this class and its parents.
:returns: The methods defined on the class.
:rtype: iterable(FunctionDef)
"""
done = {}
for astroid in itertools.chain(iter((self,)), self.ancestors()):
for meth in astroid.mymethods():
if meth.name in done:
continue
done[meth.name] = None
yield meth
def mymethods(self):
"""Iterate over all of the method defined in this class only.
:returns: The methods defined on the class.
:rtype: iterable(FunctionDef)
"""
for member in self.values():
if isinstance(member, FunctionDef):
yield member
def implicit_metaclass(self):
"""Get the implicit metaclass of the current class.
For newstyle classes, this will return an instance of builtins.type.
For oldstyle classes, it will simply return None, since there's
no implicit metaclass there.
:returns: The metaclass.
:rtype: builtins.type or None
"""
if self.newstyle:
return builtin_lookup("type")[1][0]
return None
_metaclass = None
def declared_metaclass(self, context=None):
"""Return the explicit declared metaclass for the current class.
An explicit declared metaclass is defined
either by passing the ``metaclass`` keyword argument
in the class definition line (Python 3) or (Python 2) by
having a ``__metaclass__`` class attribute, or if there are
no explicit bases but there is a global ``__metaclass__`` variable.
:returns: The metaclass of this class,
or None if one could not be found.
:rtype: NodeNG or None
"""
for base in self.bases:
try:
for baseobj in base.infer(context=context):
if isinstance(baseobj, ClassDef) and baseobj.hide:
self._metaclass = baseobj._metaclass
self._metaclass_hack = True
break
except InferenceError:
pass
if self._metaclass:
# Expects this from Py3k TreeRebuilder
try:
return next(
node
for node in self._metaclass.infer(context=context)
if node is not util.Uninferable
)
except (InferenceError, StopIteration):
return None
return None
def _find_metaclass(self, seen=None, context=None):
if seen is None:
seen = set()
seen.add(self)
klass = self.declared_metaclass(context=context)
if klass is None:
for parent in self.ancestors(context=context):
if parent not in seen:
klass = parent._find_metaclass(seen)
if klass is not None:
break
return klass
def metaclass(self, context=None):
"""Get the metaclass of this class.
If this class does not define explicitly a metaclass,
then the first defined metaclass in ancestors will be used
instead.
:returns: The metaclass of this class.
:rtype: NodeNG or None
"""
return self._find_metaclass(context=context)
def has_metaclass_hack(self):
return self._metaclass_hack
def _islots(self):
"""Return an iterator with the inferred slots."""
if "__slots__" not in self.locals:
return None
for slots in self.igetattr("__slots__"):
# check if __slots__ is a valid type
for meth in ITER_METHODS:
try:
slots.getattr(meth)
break
except AttributeInferenceError:
continue
else:
continue
if isinstance(slots, node_classes.Const):
# a string. Ignore the following checks,
# but yield the node, only if it has a value
if slots.value:
yield slots
continue
if not hasattr(slots, "itered"):
# we can't obtain the values, maybe a .deque?
continue
if isinstance(slots, node_classes.Dict):
values = [item[0] for item in slots.items]
else:
values = slots.itered()
if values is util.Uninferable:
continue
if not values:
# Stop the iteration, because the class
# has an empty list of slots.
return values
for elt in values:
try:
for inferred in elt.infer():
if inferred is util.Uninferable:
continue
if not isinstance(
inferred, node_classes.Const
) or not isinstance(inferred.value, str):
continue
if not inferred.value:
continue
yield inferred
except InferenceError:
continue
return None
def _slots(self):
if not self.newstyle:
raise NotImplementedError(
"The concept of slots is undefined for old-style classes."
)
slots = self._islots()
try:
first = next(slots)
except StopIteration as exc:
# The class doesn't have a __slots__ definition or empty slots.
if exc.args and exc.args[0] not in ("", None):
return exc.args[0]
return None
return [first] + list(slots)
# Cached, because inferring them all the time is expensive
@decorators_mod.cached
def slots(self):
"""Get all the slots for this node.
:returns: The names of slots for this class.
If the class doesn't define any slot, through the ``__slots__``
variable, then this function will return a None.
Also, it will return None in the case the slots were not inferred.
:rtype: list(str) or None
"""
def grouped_slots(
mro: List["ClassDef"],
) -> typing.Iterator[Optional[node_classes.NodeNG]]:
# Not interested in object, since it can't have slots.
for cls in mro[:-1]:
try:
cls_slots = cls._slots()
except NotImplementedError:
continue
if cls_slots is not None:
yield from cls_slots
else:
yield None
if not self.newstyle:
raise NotImplementedError(
"The concept of slots is undefined for old-style classes."
)
try:
mro = self.mro()
except MroError as e:
raise NotImplementedError(
"Cannot get slots while parsing mro fails."
) from e
slots = list(grouped_slots(mro))
if not all(slot is not None for slot in slots):
return None
return sorted(set(slots), key=lambda item: item.value)
def _inferred_bases(self, context=None):
# Similar with .ancestors, but the difference is when one base is inferred,
# only the first object is wanted. That's because
# we aren't interested in superclasses, as in the following
# example:
#
# class SomeSuperClass(object): pass
# class SomeClass(SomeSuperClass): pass
# class Test(SomeClass): pass
#
# Inferring SomeClass from the Test's bases will give
# us both SomeClass and SomeSuperClass, but we are interested
# only in SomeClass.
if context is None:
context = InferenceContext()
if not self.bases and self.qname() != "builtins.object":
yield builtin_lookup("object")[1][0]
return
for stmt in self.bases:
try:
# Find the first non-None inferred base value
baseobj = next(
b
for b in stmt.infer(context=context.clone())
if not (isinstance(b, Const) and b.value is None)
)
except (InferenceError, StopIteration):
continue
if isinstance(baseobj, bases.Instance):
baseobj = baseobj._proxied
if not isinstance(baseobj, ClassDef):
continue
if not baseobj.hide:
yield baseobj
else:
yield from baseobj.bases
def _compute_mro(self, context=None):
inferred_bases = list(self._inferred_bases(context=context))
bases_mro = []
for base in inferred_bases:
if base is self:
continue
try:
mro = base._compute_mro(context=context)
bases_mro.append(mro)
except NotImplementedError:
# Some classes have in their ancestors both newstyle and
# old style classes. For these we can't retrieve the .mro,
# although in Python it's possible, since the class we are
# currently working is in fact new style.
# So, we fallback to ancestors here.
ancestors = list(base.ancestors(context=context))
bases_mro.append(ancestors)
unmerged_mro = [[self]] + bases_mro + [inferred_bases]
unmerged_mro = list(clean_duplicates_mro(unmerged_mro, self, context))
clean_typing_generic_mro(unmerged_mro)
return _c3_merge(unmerged_mro, self, context)
def mro(self, context=None) -> List["ClassDef"]:
"""Get the method resolution order, using C3 linearization.
:returns: The list of ancestors, sorted by the mro.
:rtype: list(NodeNG)
:raises DuplicateBasesError: Duplicate bases in the same class base
:raises InconsistentMroError: A class' MRO is inconsistent
"""
return self._compute_mro(context=context)
def bool_value(self, context=None):
"""Determine the boolean value of this node.
:returns: The boolean value of this node.
For a :class:`ClassDef` this is always ``True``.
:rtype: bool
"""
return True
def get_children(self):
if self.decorators is not None:
yield self.decorators
yield from self.bases
if self.keywords is not None:
yield from self.keywords
yield from self.body
@decorators_mod.cached
def _get_assign_nodes(self):
children_assign_nodes = (
child_node._get_assign_nodes() for child_node in self.body
)
return list(itertools.chain.from_iterable(children_assign_nodes))
def frame(self: T, *, future: Literal[None, True] = None) -> T:
"""The node's frame node.
A frame node is a :class:`Module`, :class:`FunctionDef`,
:class:`ClassDef` or :class:`Lambda`.
:returns: The node itself.
"""
return self
| 33.919629 | 102 | 0.585277 |
6f1c74b6803d4ecf7f1e25eacad0215661193023
| 3,119 |
py
|
Python
|
certbot-dns-sakuracloud/certbot_dns_sakuracloud/dns_sakuracloud.py
|
tsrivishnu/certbot
|
81f02e5578819220e0b4e15a9ceca9b77fff436e
|
[
"Apache-2.0"
] | 1 |
2020-04-18T03:12:14.000Z
|
2020-04-18T03:12:14.000Z
|
certbot-dns-sakuracloud/certbot_dns_sakuracloud/dns_sakuracloud.py
|
tsrivishnu/certbot
|
81f02e5578819220e0b4e15a9ceca9b77fff436e
|
[
"Apache-2.0"
] | null | null | null |
certbot-dns-sakuracloud/certbot_dns_sakuracloud/dns_sakuracloud.py
|
tsrivishnu/certbot
|
81f02e5578819220e0b4e15a9ceca9b77fff436e
|
[
"Apache-2.0"
] | 3 |
2019-03-21T23:21:38.000Z
|
2020-06-23T20:56:56.000Z
|
"""DNS Authenticator for Sakura Cloud DNS."""
import logging
import zope.interface
from lexicon.providers import sakuracloud
from certbot import interfaces
from certbot.plugins import dns_common
from certbot.plugins import dns_common_lexicon
logger = logging.getLogger(__name__)
APIKEY_URL = "https://secure.sakura.ad.jp/cloud/#!/apikey/top/"
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
class Authenticator(dns_common.DNSAuthenticator):
"""DNS Authenticator for Sakura Cloud DNS
This Authenticator uses the Sakura Cloud API to fulfill a dns-01 challenge.
"""
description = 'Obtain certificates using a DNS TXT record ' + \
'(if you are using Sakura Cloud for DNS).'
ttl = 60
def __init__(self, *args, **kwargs):
super(Authenticator, self).__init__(*args, **kwargs)
self.credentials = None
@classmethod
def add_parser_arguments(cls, add): # pylint: disable=arguments-differ
super(Authenticator, cls).add_parser_arguments(
add, default_propagation_seconds=90)
add('credentials', help='Sakura Cloud credentials file.')
def more_info(self): # pylint: disable=missing-docstring,no-self-use
return 'This plugin configures a DNS TXT record to respond to a dns-01 challenge using ' + \
'the Sakura Cloud API.'
def _setup_credentials(self):
self.credentials = self._configure_credentials(
'credentials',
'Sakura Cloud credentials file',
{
'api-token': \
'API token for Sakura Cloud API obtained from {0}'.format(APIKEY_URL),
'api-secret': \
'API secret for Sakura Cloud API obtained from {0}'.format(APIKEY_URL),
}
)
def _perform(self, domain, validation_name, validation):
self._get_sakuracloud_client().add_txt_record(
domain, validation_name, validation)
def _cleanup(self, domain, validation_name, validation):
self._get_sakuracloud_client().del_txt_record(
domain, validation_name, validation)
def _get_sakuracloud_client(self):
return _SakuraCloudLexiconClient(
self.credentials.conf('api-token'),
self.credentials.conf('api-secret'),
self.ttl
)
class _SakuraCloudLexiconClient(dns_common_lexicon.LexiconClient):
"""
Encapsulates all communication with the Sakura Cloud via Lexicon.
"""
def __init__(self, api_token, api_secret, ttl):
super(_SakuraCloudLexiconClient, self).__init__()
self.provider = sakuracloud.Provider({
'auth_token': api_token,
'auth_secret': api_secret,
'ttl': ttl,
})
def _handle_http_error(self, e, domain_name):
if domain_name in str(e) and (str(e).startswith('404 Client Error: Not Found for url:')):
return # Expected errors when zone name guess is wrong
return super(_SakuraCloudLexiconClient, self)._handle_http_error(e, domain_name)
| 35.443182 | 100 | 0.66688 |
92850dbe84600464c60a0a1067ed32bdb4a5b129
| 8,817 |
py
|
Python
|
sockeye/translate.py
|
Izecson/sockeye-1.16.6
|
f84044d4a64b2bcf744ccd4f94b16f8133d1f383
|
[
"Apache-2.0"
] | null | null | null |
sockeye/translate.py
|
Izecson/sockeye-1.16.6
|
f84044d4a64b2bcf744ccd4f94b16f8133d1f383
|
[
"Apache-2.0"
] | null | null | null |
sockeye/translate.py
|
Izecson/sockeye-1.16.6
|
f84044d4a64b2bcf744ccd4f94b16f8133d1f383
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Translation CLI.
"""
import argparse
import sys
import time
from math import ceil
from contextlib import ExitStack
from typing import Optional, Iterable
import mxnet as mx
import sockeye
import sockeye.arguments as arguments
import sockeye.constants as C
import sockeye.data_io
import sockeye.inference
from sockeye.lexicon import TopKLexicon
import sockeye.output_handler
from sockeye.log import setup_main_logger
from sockeye.utils import acquire_gpus, get_num_gpus, log_basic_info
from sockeye.utils import check_condition, grouper
logger = setup_main_logger(__name__, file_logging=False)
def main():
params = argparse.ArgumentParser(description='Translate CLI')
arguments.add_translate_cli_args(params)
args = params.parse_args()
if args.output is not None:
global logger
logger = setup_main_logger(__name__,
console=not args.quiet,
file_logging=True,
path="%s.%s" % (args.output, C.LOG_NAME))
if args.checkpoints is not None:
check_condition(len(args.checkpoints) == len(args.models), "must provide checkpoints for each model")
if args.target is not None:
args.beam_size = 1
log_basic_info(args)
output_handler = sockeye.output_handler.get_output_handler(args.output_type,
args.output,
args.sure_align_threshold,
args.output_attention_type)
with ExitStack() as exit_stack:
context = _setup_context(args, exit_stack)
models, vocab_source, vocab_target = sockeye.inference.load_models(
context,
args.max_input_len,
args.beam_size,
args.batch_size,
args.models,
args.checkpoints,
args.softmax_temperature,
args.max_output_length_num_stds,
args.output_attention_type,
decoder_return_logit_inputs=args.restrict_lexicon is not None,
cache_output_layer_w_b=args.restrict_lexicon is not None)
restrict_lexicon = None # type: TopKLexicon
if args.restrict_lexicon:
restrict_lexicon = TopKLexicon(vocab_source, vocab_target)
restrict_lexicon.load(args.restrict_lexicon)
translator = sockeye.inference.Translator(context,
args.ensemble_mode,
args.bucket_width,
sockeye.inference.LengthPenalty(args.length_penalty_alpha,
args.length_penalty_beta),
models,
vocab_source,
vocab_target,
restrict_lexicon,
args.output_attention_type,
args.output_attention_head_id)
read_and_translate(translator, output_handler, args.chunk_size, args.input, args.target)
def read_and_translate(translator: sockeye.inference.Translator, output_handler: sockeye.output_handler.OutputHandler,
chunk_size: Optional[int], source: Optional[str] = None, target: Optional[str] = None) -> None:
"""
Reads from either a file or stdin and translates each line, calling the output_handler with the result.
:param output_handler: Handler that will write output to a stream.
:param translator: Translator that will translate each line of input.
:param chunk_size: The size of the portion to read at a time from the input.
:param source: Path to file which will be translated line-by-line if included, if none use stdin.
"""
source_data = sys.stdin if source is None else sockeye.data_io.smart_open(source)
target_data = None if target is None else sockeye.data_io.smart_open(target)
batch_size = translator.batch_size
if chunk_size is None:
if translator.batch_size == 1:
# No batching, therefore there is not need to read segments in chunks.
chunk_size = C.CHUNK_SIZE_NO_BATCHING
else:
# Get a constant number of batches per call to Translator.translate.
chunk_size = C.CHUNK_SIZE_PER_BATCH_SEGMENT * translator.batch_size
else:
if chunk_size < translator.batch_size:
logger.warning("You specified a chunk size (%d) smaller than the batch size (%d). This will lead to "
"a degregation of translation speed. Consider choosing a larger chunk size." % (chunk_size,
batch_size))
logger.info("Translating...")
total_time, total_lines = 0.0, 0
if target_data is None:
for chunk in grouper(source_data, chunk_size):
chunk_time = translate(output_handler, chunk, None, translator, total_lines)
total_lines += len(chunk)
total_time += chunk_time
else:
for chunk in zip(grouper(source_data, chunk_size), grouper(target_data, chunk_size)):
chunk_time = translate(output_handler, *chunk, translator, total_lines)
total_lines += len(chunk)
total_time += chunk_time
if total_lines != 0:
logger.info("Processed %d lines in %d batches. Total time: %.4f, sec/sent: %.4f, sent/sec: %.4f",
total_lines, ceil(total_lines / batch_size), total_time,
total_time / total_lines, total_lines / total_time)
else:
logger.info("Processed 0 lines.")
def translate(output_handler: sockeye.output_handler.OutputHandler, source_data: Iterable[str], target_data: Iterable[str],
translator: sockeye.inference.Translator, chunk_id: int = 0) -> float:
"""
Translates each line from source_data, calling output handler after translating a batch.
:param output_handler: A handler that will be called once with the output of each translation.
:param source_data: A enumerable list of source sentences that will be translated.
:param translator: The translator that will be used for each line of input.
:param chunk_id: Global id of the chunk.
:return: Total time taken.
"""
tic = time.time()
if target_data is None:
trans_inputs = [translator.make_input(i, line) for i, line in enumerate(source_data, chunk_id + 1)]
trans_outputs = translator.translate(trans_inputs)
else:
trans_inputs = [translator.make_scorer_input(i, *line) for i, line in enumerate(zip(source_data, target_data), chunk_id + 1)]
trans_outputs = translator.run_scorer(trans_inputs)
total_time = time.time() - tic
batch_time = total_time / len(trans_inputs)
for trans_input, trans_output in zip(trans_inputs, trans_outputs):
output_handler.handle(trans_input, trans_output, batch_time)
return total_time
def _setup_context(args, exit_stack):
if args.use_cpu:
context = mx.cpu()
else:
num_gpus = get_num_gpus()
check_condition(num_gpus >= 1,
"No GPUs found, consider running on the CPU with --use-cpu "
"(note: check depends on nvidia-smi and this could also mean that the nvidia-smi "
"binary isn't on the path).")
check_condition(len(args.device_ids) == 1, "cannot run on multiple devices for now")
gpu_id = args.device_ids[0]
if args.disable_device_locking:
if gpu_id < 0:
# without locking and a negative device id we just take the first device
gpu_id = 0
else:
gpu_ids = exit_stack.enter_context(acquire_gpus([gpu_id], lock_dir=args.lock_dir))
gpu_id = gpu_ids[0]
context = mx.gpu(gpu_id)
return context
if __name__ == '__main__':
main()
| 44.085 | 133 | 0.623908 |
9d9ec0f5398fcf58bb758fad3f7d5ea76f3e3575
| 3,097 |
py
|
Python
|
extras/test/arduino_format_test.py
|
MalcolmSlaney/audio-to-tactile
|
8c1fa37509aa53307f24dc7d54e99f730a8bcc1f
|
[
"Apache-2.0"
] | 64 |
2019-05-03T17:33:07.000Z
|
2022-03-30T17:19:05.000Z
|
extras/test/arduino_format_test.py
|
MalcolmSlaney/audio-to-tactile
|
8c1fa37509aa53307f24dc7d54e99f730a8bcc1f
|
[
"Apache-2.0"
] | null | null | null |
extras/test/arduino_format_test.py
|
MalcolmSlaney/audio-to-tactile
|
8c1fa37509aa53307f24dc7d54e99f730a8bcc1f
|
[
"Apache-2.0"
] | 10 |
2019-05-17T15:03:34.000Z
|
2022-01-29T08:52:03.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Some limited checks that repo follows the Arduino library format.
This project has a specific directory structure in order to conform to the
Arduino library format. All embedded library code is under a srcs/ folder,
examples are in examples/, and all other non-library code (including tests) is
in extras/. For details, see
https://arduino.github.io/arduino-cli/latest/library-specification/
This format is required for publication in the Arduino Library Manager:
https://github.com/arduino/Arduino/wiki/Library-Manager-FAQ
"""
import os
import os.path
import unittest
REPO_ROOT = './'
def walk_dir_tree(directory):
for dir_name, _, file_names in os.walk(directory):
for file_name in file_names:
yield os.path.join(dir_name, file_name)
class ArduinoFormatTest(unittest.TestCase):
def _check_file_extensions(self, directory, allowed_extensions):
"""Checks that all files under `directory` have an allowed extension."""
self.assertTrue(
os.path.isdir(directory), msg=f'"{directory}" is not a directory')
for filename in walk_dir_tree(directory):
self.assertIn(
os.path.splitext(filename)[1],
allowed_extensions,
msg=f'"{filename}" has bad extension')
def test_root(self):
"""Test that repo root has no code files."""
for filename in os.listdir(REPO_ROOT):
self.assertNotIn(
os.path.splitext(filename)[1].lower(),
('.cpp', '.c', '.h', '.cc', '.s', '.ino', '.sh'),
msg=f'Code file "{filename}" not allowed in repo root.')
def test_examples_dir(self):
"""Test that examples/ dir contains only C/C++ code and INO sketches."""
examples_dir = os.path.join(REPO_ROOT, 'examples')
self._check_file_extensions(examples_dir, ('.ino', '.cpp', '.c', '.h'))
def test_src_dir(self):
"""Test that src/ dir contains only C/C++ library code."""
src_dir = os.path.join(REPO_ROOT, 'src')
self._check_file_extensions(src_dir, ('.cpp', '.c', '.h'))
for filename in walk_dir_tree(src_dir):
# Look for filename ending in "_test.c" or "_test.cpp".
suffix = os.path.splitext(filename)[0].rsplit('_', 1)[-1]
self.assertNotEqual(suffix.lower(), 'test',
msg=f'Test "{filename}" must go under extras/test.')
def test_library_properties_file(self):
"""Test that `library.properties` file exists."""
self.assertTrue(
os.path.isfile(os.path.join(REPO_ROOT, 'library.properties')))
if __name__ == '__main__':
unittest.main()
| 36.869048 | 79 | 0.694866 |
5cda78ebb9326feb67b4ce18fdfc61a624995d10
| 1,701 |
py
|
Python
|
awsrun/utils/curry.py
|
veb61/eec-289-ucd
|
fabd9c01c5e9dfaf869fe22e537fe08aafd4e622
|
[
"MIT"
] | null | null | null |
awsrun/utils/curry.py
|
veb61/eec-289-ucd
|
fabd9c01c5e9dfaf869fe22e537fe08aafd4e622
|
[
"MIT"
] | null | null | null |
awsrun/utils/curry.py
|
veb61/eec-289-ucd
|
fabd9c01c5e9dfaf869fe22e537fe08aafd4e622
|
[
"MIT"
] | 1 |
2021-10-07T23:10:33.000Z
|
2021-10-07T23:10:33.000Z
|
from __future__ import annotations
from typing import Callable, Generic, TypeVar, Union
ReturnType = TypeVar("ReturnType")
def curry(num_args: int) -> Callable[[Callable[..., ReturnType]], Partial[ReturnType]]:
def decorator(fn: Callable[..., ReturnType]):
return Partial(num_args, fn)
return decorator
class Partial(Generic[ReturnType]):
def __init__(
self, num_args: int, fn: Callable[..., ReturnType], *args, **kwargs
) -> None:
self.num_args = num_args
self.fn = fn
self.args = args
self.kwargs = kwargs
def __call__(self, *more_args, **more_kwargs) -> Union[Partial[ReturnType], ReturnType]:
all_args = self.args + more_args # tuple addition
all_kwargs = dict(**self.kwargs, **more_kwargs) # non-mutative dictionary union
num_args = len(all_args) + len(all_kwargs)
if num_args >= self.num_args:
return self.fn(*all_args, **all_kwargs)
else:
return Partial(self.num_args, self.fn, *all_args, **all_kwargs)
def __repr__(self):
return f"Partial({self.fn}, args={self.args}, kwargs={self.kwargs})"
def curry_functional(num_args: int):
def decorator(fn: Callable[..., ReturnType]):
def init(*args, **kwargs):
def call(*more_args, **more_kwargs):
all_args = args + more_args
all_kwargs = dict(**kwargs, **more_kwargs)
if len(all_args) + len(all_kwargs) >= num_args:
return fn(*all_args, **all_kwargs)
else:
return init(*all_args, **all_kwargs)
return call
return init()
return decorator
| 32.711538 | 92 | 0.604938 |
4bb493dfbdce8aadd53c9b5199998cd56e4ab909
| 97 |
py
|
Python
|
pacote-download/pythonProject/exercicios_python_guanabara/teste.py
|
oliveirajonathas/python_estudos
|
28921672d7e5d0866030c45b077a28998905f752
|
[
"MIT"
] | null | null | null |
pacote-download/pythonProject/exercicios_python_guanabara/teste.py
|
oliveirajonathas/python_estudos
|
28921672d7e5d0866030c45b077a28998905f752
|
[
"MIT"
] | null | null | null |
pacote-download/pythonProject/exercicios_python_guanabara/teste.py
|
oliveirajonathas/python_estudos
|
28921672d7e5d0866030c45b077a28998905f752
|
[
"MIT"
] | null | null | null |
tupla = (1, 5, 7, 3)
print(sorted(tupla))
print(tupla.index(5))
val = list(range(1,5))
print(val)
| 19.4 | 22 | 0.649485 |
dbdadcbadfd6a1d07d6846d7dfe2d93d6dfaef49
| 1,416 |
py
|
Python
|
tests/conftest.py
|
tgrx/whalekiller
|
3ba6a297ae47ca853c438fa0dd950e7eff954f6f
|
[
"MIT"
] | 1 |
2021-11-09T18:05:11.000Z
|
2021-11-09T18:05:11.000Z
|
tests/conftest.py
|
tgrx/whalekiller
|
3ba6a297ae47ca853c438fa0dd950e7eff954f6f
|
[
"MIT"
] | 6 |
2021-02-18T21:47:25.000Z
|
2021-02-23T04:45:28.000Z
|
tests/conftest.py
|
tgrx/whalekiller
|
3ba6a297ae47ca853c438fa0dd950e7eff954f6f
|
[
"MIT"
] | null | null | null |
import asyncio
import pytest
from framework.config import settings
from framework.dirs import DIR_TESTS_ASSETS
from main.actions import reset_cloud
from main.actions import setup_cloud
from main.schemas import CloudConfigSchema
@pytest.yield_fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
@pytest.yield_fixture(scope="session", autouse=True)
def service_url():
host = settings.HOST
port = settings.PORT
url = f"http://{host}:{port}"
yield url
@pytest.yield_fixture(scope="function", autouse=True)
async def empty_cloud(event_loop):
yield
await reset_cloud()
@pytest.yield_fixture(scope="function")
async def config_0(event_loop, empty_cloud):
yield await _config_fixture("input-0.json")
@pytest.yield_fixture(scope="function")
async def config_1(event_loop, empty_cloud):
yield await _config_fixture("input-1.json")
@pytest.yield_fixture(scope="function")
async def config_2(event_loop, empty_cloud):
yield await _config_fixture("input-2.json")
@pytest.yield_fixture(scope="function")
async def config_3(event_loop, empty_cloud):
yield await _config_fixture("input-3.json")
async def _config_fixture(asset: str) -> CloudConfigSchema:
path_to_file = (DIR_TESTS_ASSETS / asset).resolve().as_posix()
config = CloudConfigSchema.parse_file(path_to_file)
await setup_cloud(config)
return config
| 22.47619 | 66 | 0.757768 |
b0c0b00296c0e533d7093cb76f16ce98cb1be8c5
| 3,289 |
py
|
Python
|
test_junkie/reporter/xml_reporter.py
|
Rdvp1514/test_junkie
|
9246a33abc9ac8d6584781dcbe95e1093507aa8f
|
[
"MIT"
] | 72 |
2018-10-25T18:32:42.000Z
|
2022-02-02T03:03:09.000Z
|
test_junkie/reporter/xml_reporter.py
|
Rdvp1514/test_junkie
|
9246a33abc9ac8d6584781dcbe95e1093507aa8f
|
[
"MIT"
] | 41 |
2018-12-13T22:30:35.000Z
|
2021-11-04T09:08:49.000Z
|
test_junkie/reporter/xml_reporter.py
|
Rdvp1514/test_junkie
|
9246a33abc9ac8d6584781dcbe95e1093507aa8f
|
[
"MIT"
] | 10 |
2019-04-05T10:51:11.000Z
|
2021-12-06T15:18:56.000Z
|
import traceback
from test_junkie.constants import TestCategory
from test_junkie.debugger import LogJunkie
class XmlReporter:
@staticmethod
def create_xml_report(write_file, suites):
def __update_tag_stats(tag, status):
tag.set("tests", str(int(suite.get("tests")) + 1))
if status == TestCategory.SUCCESS:
tag.set("passed", str(int(tag.get("passed")) + 1))
else:
tag.set("failures", str(int(tag.get("failures")) + 1))
return tag
if write_file is not None:
try:
import os
from xml.etree.ElementTree import ElementTree, Element, SubElement
import xml
if not os.path.exists(write_file):
request = Element("root")
ElementTree(request).write(write_file)
xml_file = xml.etree.ElementTree.parse(write_file)
root = xml_file.getroot()
for suite_object in suites:
test_suite = suite_object.get_class_name()
tests = suite_object.get_test_objects()
for test_object in tests:
test_name = test_object.get_function_name()
test_metrics = test_object.metrics.get_metrics()
for class_param, class_param_data in test_metrics.items():
for param, param_data in class_param_data.items():
test_status = param_data["status"]
if test_status != TestCategory.SUCCESS:
test_status = "failure"
suite_found = False
for suite in root.iter("testsuite"):
suite_found = suite.attrib["name"] == test_suite
if suite_found:
__update_tag_stats(suite, test_status)
test = Element("testcase", name=str(test_name), status=str(test_status))
if test_status == "failure":
failure = Element("failure", type="failure")
test.append(failure)
suite.append(test)
ElementTree(root).write(write_file)
break
if not suite_found:
suite = SubElement(root, "testsuite", name=test_suite,
tests="0", passed="0", failures="0")
__update_tag_stats(suite, test_status)
test = SubElement(suite, "testcase", name=str(test_name), status=str(test_status))
if test_status == "failure":
SubElement(test, "failure", type="failure")
ElementTree(root).write(write_file)
except:
LogJunkie.error(traceback.format_exc())
| 45.054795 | 118 | 0.461843 |
4d38059908fcefcdeb6c1a2f9a2ca8c56e993c6b
| 5,431 |
py
|
Python
|
src/utils/logger.py
|
Neptune1201/ASIM
|
9f3fefa40ef6a84f531f3fd48f34ecce8080650a
|
[
"Apache-2.0"
] | 3 |
2021-10-04T03:29:39.000Z
|
2022-02-04T17:45:28.000Z
|
src/utils/logger.py
|
Neptune1201/ASIM
|
9f3fefa40ef6a84f531f3fd48f34ecce8080650a
|
[
"Apache-2.0"
] | 1 |
2021-10-05T12:45:38.000Z
|
2021-10-05T12:45:38.000Z
|
src/utils/logger.py
|
Neptune1201/ASIM
|
9f3fefa40ef6a84f531f3fd48f34ecce8080650a
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright (C) 2019 Alibaba Group Holding Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import logging
class Logger:
def __init__(self, args):
log = logging.getLogger(args.summary_dir)
if not log.handlers:
log.setLevel(logging.DEBUG)
fh = logging.FileHandler(os.path.join(args.summary_dir, args.log_file))
fh.setLevel(logging.INFO)
ch = ProgressHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
log.addHandler(fh)
log.addHandler(ch)
self.log = log
# setup TensorBoard
if args.tensorboard:
from tensorboardX import SummaryWriter
self.writer = SummaryWriter(os.path.join(args.summary_dir, 'viz'))
self.log.info(f'TensorBoard activated.')
else:
self.writer = None
self.log_per_updates = args.log_per_updates
self.summary_per_updates = args.summary_per_updates
self.grad_clipping = args.grad_clipping
self.clips = 0
self.train_meters = {}
self.epoch = None
self.best_eval = 0.
self.best_eval_str = ''
def set_epoch(self, epoch):
self(f'Epoch: {epoch}')
self.epoch = epoch
@staticmethod
def _format_number(x):
return f'{x:.4f}' if float(x) > 1e-3 else f'{x:.4e}'
def update(self, stats):
updates = stats.pop('updates')
summary = stats.pop('summary')
if updates % self.log_per_updates == 0:
self.clips += int(stats['gnorm'] > self.grad_clipping)
stats_str = ' '.join(f'{key}: ' + self._format_number(val) for key, val in stats.items())
for key, val in stats.items():
if key not in self.train_meters:
self.train_meters[key] = AverageMeter()
self.train_meters[key].update(val)
msg = f'epoch {self.epoch} updates {updates} {stats_str}'
if self.log_per_updates != 1:
msg = '> ' + msg
self.log.info(msg)
if self.writer and updates % self.summary_per_updates == 0:
for key, val in stats.items():
self.writer.add_scalar(f'train/{key}', val, updates)
for key, val in summary.items():
self.writer.add_histogram(key, val, updates)
def newline(self):
self.log.debug('')
def log_eval(self, valid_stats):
self.newline()
updates = valid_stats.pop('updates')
eval_score = valid_stats.pop('score')
# report the exponential averaged training stats, while reporting the full dev set stats
if self.train_meters:
train_stats_str = ' '.join(f'{key}: ' + self._format_number(val) for key, val in self.train_meters.items())
train_stats_str += ' ' + f'clip: {self.clips}'
self.log.info(f'train {train_stats_str}')
valid_stats_str = ' '.join(f'{key}: ' + self._format_number(val) for key, val in valid_stats.items())
if eval_score > self.best_eval:
self.best_eval_str = valid_stats_str
self.best_eval = eval_score
valid_stats_str += ' [NEW BEST]'
else:
valid_stats_str += f' [BEST: {self._format_number(self.best_eval)}]'
self.log.info(f'valid {valid_stats_str}')
if self.writer:
for key in valid_stats.keys():
group = {'valid': valid_stats[key]}
if self.train_meters and key in self.train_meters:
group['train'] = float(self.train_meters[key])
self.writer.add_scalars(f'valid/{key}', group, updates)
self.train_meters = {}
self.clips = 0
def __call__(self, msg):
self.log.info(msg)
class ProgressHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
log_entry = self.format(record)
if record.message.startswith('> '):
sys.stdout.write('{}\r'.format(log_entry.rstrip()))
sys.stdout.flush()
else:
sys.stdout.write('{}\n'.format(log_entry))
class AverageMeter(object):
"""Keep exponential weighted averages."""
def __init__(self, beta=0.99):
self.beta = beta
self.moment = 0.
self.value = 0.
self.t = 0.
def update(self, val):
self.t += 1
self.moment = self.beta * self.moment + (1 - self.beta) * val
# bias correction
self.value = self.moment / (1 - self.beta ** self.t)
def __format__(self, spec):
return format(self.value, spec)
def __float__(self):
return self.value
| 37.19863 | 119 | 0.598048 |
f82df9c2be0bba5a28598f3574c16491485dc2a0
| 2,003 |
py
|
Python
|
src/masonite/tests/TestCommand.py
|
cercos/masonite
|
f7f220efa7fae833683e9f07ce13c3795a87d3b8
|
[
"MIT"
] | 1,816 |
2018-02-14T01:59:51.000Z
|
2022-03-31T17:09:20.000Z
|
src/masonite/tests/TestCommand.py
|
cercos/masonite
|
f7f220efa7fae833683e9f07ce13c3795a87d3b8
|
[
"MIT"
] | 340 |
2018-02-11T00:27:26.000Z
|
2022-03-21T12:00:24.000Z
|
src/masonite/tests/TestCommand.py
|
cercos/masonite
|
f7f220efa7fae833683e9f07ce13c3795a87d3b8
|
[
"MIT"
] | 144 |
2018-03-18T00:08:16.000Z
|
2022-02-26T01:51:58.000Z
|
from cleo import CommandTester
class TestCommand:
"""This class allows us to test craft commands and asserts command outputs."""
def __init__(self, application):
self.application = application
def run(self, command, arguments_str=""):
command = self.application.make("commands").command_application.find(command)
self.command_tester = CommandTester(command)
self.command_tester.execute(arguments_str)
return self
def assertExactOutput(self, ref_output):
"""Assert command output to be exactly the same as the given reference output."""
output = self._get_output()
assert ref_output == output, f"Command output was: {output}, not {ref_output}"
return self
def assertOutputContains(self, ref_output):
output = self._get_output()
assert (
ref_output in output
), f"Command output was: {output} and does not contain {ref_output}."
return self
def assertOutputMissing(self, ref_output):
"""Assert command output does not contain the given reference output."""
output = self._get_output()
assert (
ref_output not in output
), f"Command output was: {output}, not {ref_output}"
return self
def assertHasErrors(self):
assert self._get_errors()
return self
def assertExactErrors(self, ref_errors):
errors = self._get_errors()
assert (
errors == ref_errors
), f"Command output has errors: {errors}, not {ref_errors}."
return self
def assertSuccess(self):
"""Assert that command returned a 0 exit code meaning that it ran successfully."""
code = self.command_tester.status_code
assert 0 == code, "Command exited code is not 0: {code}."
return self
def _get_errors(self):
return self.command_tester.io.fetch_error()
def _get_output(self):
return self.command_tester.io.fetch_output()
| 33.949153 | 90 | 0.651523 |
3e5eb03afe903a53f3019873fe316f3f2f1da469
| 1,073 |
py
|
Python
|
setup.py
|
foolscap/pocket
|
77a9c82574feb05cce5a93641936793ffb28cc93
|
[
"BSD-3-Clause"
] | 1 |
2015-05-26T09:27:31.000Z
|
2015-05-26T09:27:31.000Z
|
setup.py
|
foolscap/pocket
|
77a9c82574feb05cce5a93641936793ffb28cc93
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
foolscap/pocket
|
77a9c82574feb05cce5a93641936793ffb28cc93
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup
setup(
name = "pocket", # pip install pocket
description = "api wrapper for getpocket.com",
#long_description=open('README.md', 'rt').read(),
# version
# third part for minor release
# second when api changes
# first when it becomes stable someday
version = "0.3.5",
author = 'Tapan Pandita',
author_email = "[email protected]",
url = 'http://github.com/tapanpandita/pocket/',
license = 'BSD',
# as a practice no need to hard code version unless you know program wont
# work unless the specific versions are used
install_requires = ["requests", ],
py_modules = ["pocket"],
zip_safe = True,
)
# TODO: Do all this and delete these lines
# register: Create an accnt on pypi, store your credentials in ~/.pypirc:
#
# [pypirc]
# servers =
# pypi
#
# [server-login]
# username:<username>
# password:<pass>
#
# $ python setup.py register # one time only, will create pypi page for pocket
# $ python setup.py sdist --formats=gztar,zip upload # create a new release
#
| 25.547619 | 78 | 0.667288 |
04b75304988c1e9e1d00577601c092d5d208480a
| 5,260 |
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/operations/_usages_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8 |
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/operations/_usages_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/operations/_usages_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations(object):
"""UsagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.UsagesListResult"]
"""List network usages for a subscription.
:param location: The location where resource usage is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsagesListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.UsagesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.UsagesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._ ]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('UsagesListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/usages'} # type: ignore
| 44.576271 | 134 | 0.649049 |
29b7bd8da188f29cd2235f51cec163d04917d8d9
| 634 |
py
|
Python
|
examples/show_sample_data.py
|
dstansby/arboretum
|
697598d600312ae527a1fb3f08021307feeeb571
|
[
"MIT"
] | 3 |
2021-12-29T16:48:00.000Z
|
2022-03-31T09:19:55.000Z
|
examples/show_sample_data.py
|
dstansby/arboretum
|
697598d600312ae527a1fb3f08021307feeeb571
|
[
"MIT"
] | 25 |
2021-11-30T11:29:06.000Z
|
2022-03-31T13:07:13.000Z
|
examples/show_sample_data.py
|
dstansby/arboretum
|
697598d600312ae527a1fb3f08021307feeeb571
|
[
"MIT"
] | 4 |
2021-12-21T00:51:26.000Z
|
2022-03-09T15:55:45.000Z
|
"""
Load and show sample data
=========================
This example:
- loads some sample data
- adds the data to a napari viewer
- loads the arboretum plugin
- opens the napari viewer
"""
import napari
from napari_arboretum import load_sample_data
track, segmentation = load_sample_data()
viewer = napari.Viewer()
viewer.add_layer(segmentation)
viewer.add_layer(track)
viewer.window.add_plugin_dock_widget(
plugin_name="napari-arboretum", widget_name="Arboretum"
)
if __name__ == '__main__':
# The napari event loop needs to be run under here to allow the window
# to be spawned from a Python script
napari.run()
| 23.481481 | 74 | 0.728707 |
940d5f01c21e94ce013693ff5c93a1e1f15a2b2b
| 3,174 |
py
|
Python
|
pitcher/people/forms.py
|
Mantongash/pitcher
|
6b1d7d8c19f39ad0f25c505d01ce15190fbe5e4b
|
[
"Unlicense"
] | null | null | null |
pitcher/people/forms.py
|
Mantongash/pitcher
|
6b1d7d8c19f39ad0f25c505d01ce15190fbe5e4b
|
[
"Unlicense"
] | 7 |
2020-06-05T20:27:21.000Z
|
2022-01-13T02:03:41.000Z
|
pitcher/people/forms.py
|
Mantongash/pitcher
|
6b1d7d8c19f39ad0f25c505d01ce15190fbe5e4b
|
[
"Unlicense"
] | null | null | null |
from flask_wtf import FlaskForm
from flask_login import current_user
from wtforms import StringField, PasswordField, SubmitField, BooleanField, FileField
from wtforms.validators import DataRequired, Email, EqualTo, ValidationError, Length
from flask_wtf.file import FileAllowed
from pitcher.models import User
class RegistrationForm(FlaskForm):
username = StringField("Username", validators=[DataRequired()])
email = StringField("Email", validators=[DataRequired(), Email()])
password = PasswordField("Password", validators=[DataRequired()])
confirm_password = PasswordField("Confirm Password", validators=[
DataRequired(), EqualTo("password")])
submit = SubmitField("Sign Up")
def validate_username(self, username):
# if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError(
"That username is already taken. Please pick another one")
def validate_email(self, email):
# if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError(
"That email is already taken. Please pick another one")
class LoginForm(FlaskForm):
email = StringField("Email", validators=[DataRequired(), Email()])
password = PasswordField("Password", validators=[DataRequired()])
remember = BooleanField("Remember Me")
submit = SubmitField("Log In")
class UpdateAccountForm(FlaskForm):
username = StringField("Username", validators=[
DataRequired(), Length(min=2, max=20)])
email = StringField("Email", validators=[DataRequired(), Email()])
picture = FileField("Update Profile Picture", validators=[
FileAllowed(["jpg", "png"])])
submit = SubmitField("Update")
def validate_username(self, username):
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError(
"That username is already taken. Please pick another one")
def validate_email(self, email):
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError(
"That email is already taken. Please pick another one")
class RequestResetForm(FlaskForm):
email = StringField("Email", validators=[DataRequired(), Email()])
submit = SubmitField("Request Password Reset")
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError(
"There is no account with email you must register first")
class ResetPasswordForm(FlaskForm):
password = PasswordField("Password", validators=[DataRequired()])
confirm_password = PasswordField(
"Password", validators=[DataRequired(), EqualTo("password")])
submit = SubmitField("Reset Password")
| 40.692308 | 84 | 0.66005 |
6c4601ddaddcbdee53882e0541f0dd367517aa9e
| 58,648 |
py
|
Python
|
Lib/test/test_xmlrpc.py
|
ErikBjare/cpython
|
b68431fadb3150134ac6ccbf501cdfeaf4c75678
|
[
"0BSD"
] | 2 |
2022-03-27T14:52:48.000Z
|
2022-03-27T17:35:22.000Z
|
Lib/test/test_xmlrpc.py
|
dalakatt/cpython
|
2f49b97cc5426087b46515254b9a97a22ee8c807
|
[
"0BSD"
] | 8 |
2022-01-07T11:31:11.000Z
|
2022-03-04T00:07:16.000Z
|
Lib/test/test_xmlrpc.py
|
dalakatt/cpython
|
2f49b97cc5426087b46515254b9a97a22ee8c807
|
[
"0BSD"
] | 1 |
2022-03-27T18:34:54.000Z
|
2022-03-27T18:34:54.000Z
|
import base64
import datetime
import decimal
import sys
import time
import unittest
from unittest import mock
import xmlrpc.client as xmlrpclib
import xmlrpc.server
import http.client
import http, http.server
import socket
import threading
import re
import io
import contextlib
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import ALWAYS_EQ, LARGEST, SMALLEST
try:
import gzip
except ImportError:
gzip = None
support.requires_working_socket(module=True)
alist = [{'astring': '[email protected]',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2,
'anotherlist': ['.zyx.41'],
'abase64': xmlrpclib.Binary(b"my dog has fleas"),
'b64bytes': b"my dog has fleas",
'b64bytearray': bytearray(b"my dog has fleas"),
'boolean': False,
'unicode': '\u4000\u6000\u8000',
'ukey\u4000': 'regular value',
'datetime1': xmlrpclib.DateTime('20050210T11:41:23'),
'datetime2': xmlrpclib.DateTime(
(2005, 2, 10, 11, 41, 23, 0, 1, -1)),
'datetime3': xmlrpclib.DateTime(
datetime.datetime(2005, 2, 10, 11, 41, 23)),
}]
class XMLRPCTestCase(unittest.TestCase):
def test_dump_load(self):
dump = xmlrpclib.dumps((alist,))
load = xmlrpclib.loads(dump)
self.assertEqual(alist, load[0][0])
def test_dump_bare_datetime(self):
# This checks that an unwrapped datetime.date object can be handled
# by the marshalling code. This can't be done via test_dump_load()
# since with use_builtin_types set to 1 the unmarshaller would create
# datetime objects for the 'datetime[123]' keys as well
dt = datetime.datetime(2005, 2, 10, 11, 41, 23)
self.assertEqual(dt, xmlrpclib.DateTime('20050210T11:41:23'))
s = xmlrpclib.dumps((dt,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_datetime=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_datetime=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
def test_datetime_before_1900(self):
# same as before but with a date before 1900
dt = datetime.datetime(1, 2, 10, 11, 41, 23)
self.assertEqual(dt, xmlrpclib.DateTime('00010210T11:41:23'))
s = xmlrpclib.dumps((dt,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
def test_bug_1164912 (self):
d = xmlrpclib.DateTime()
((new_d,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((d,),
methodresponse=True))
self.assertIsInstance(new_d.value, str)
# Check that the output of dumps() is still an 8-bit string
s = xmlrpclib.dumps((new_d,), methodresponse=True)
self.assertIsInstance(s, str)
def test_newstyle_class(self):
class T(object):
pass
t = T()
t.x = 100
t.y = "Hello"
((t2,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((t,)))
self.assertEqual(t2, t.__dict__)
def test_dump_big_long(self):
self.assertRaises(OverflowError, xmlrpclib.dumps, (2**99,))
def test_dump_bad_dict(self):
self.assertRaises(TypeError, xmlrpclib.dumps, ({(1,2,3): 1},))
def test_dump_recursive_seq(self):
l = [1,2,3]
t = [3,4,5,l]
l.append(t)
self.assertRaises(TypeError, xmlrpclib.dumps, (l,))
def test_dump_recursive_dict(self):
d = {'1':1, '2':1}
t = {'3':3, 'd':d}
d['t'] = t
self.assertRaises(TypeError, xmlrpclib.dumps, (d,))
def test_dump_big_int(self):
if sys.maxsize > 2**31-1:
self.assertRaises(OverflowError, xmlrpclib.dumps,
(int(2**34),))
xmlrpclib.dumps((xmlrpclib.MAXINT, xmlrpclib.MININT))
self.assertRaises(OverflowError, xmlrpclib.dumps,
(xmlrpclib.MAXINT+1,))
self.assertRaises(OverflowError, xmlrpclib.dumps,
(xmlrpclib.MININT-1,))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_int(xmlrpclib.MAXINT, dummy_write)
m.dump_int(xmlrpclib.MININT, dummy_write)
self.assertRaises(OverflowError, m.dump_int,
xmlrpclib.MAXINT+1, dummy_write)
self.assertRaises(OverflowError, m.dump_int,
xmlrpclib.MININT-1, dummy_write)
def test_dump_double(self):
xmlrpclib.dumps((float(2 ** 34),))
xmlrpclib.dumps((float(xmlrpclib.MAXINT),
float(xmlrpclib.MININT)))
xmlrpclib.dumps((float(xmlrpclib.MAXINT + 42),
float(xmlrpclib.MININT - 42)))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_double(xmlrpclib.MAXINT, dummy_write)
m.dump_double(xmlrpclib.MININT, dummy_write)
m.dump_double(xmlrpclib.MAXINT + 42, dummy_write)
m.dump_double(xmlrpclib.MININT - 42, dummy_write)
def test_dump_none(self):
value = alist + [None]
arg1 = (alist + [None],)
strg = xmlrpclib.dumps(arg1, allow_none=True)
self.assertEqual(value,
xmlrpclib.loads(strg)[0][0])
self.assertRaises(TypeError, xmlrpclib.dumps, (arg1,))
def test_dump_encoding(self):
value = {'key\u20ac\xa4':
'value\u20ac\xa4'}
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15')
strg = "<?xml version='1.0' encoding='iso-8859-15'?>" + strg
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = strg.encode('iso-8859-15', 'xmlcharrefreplace')
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15',
methodresponse=True)
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = strg.encode('iso-8859-15', 'xmlcharrefreplace')
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
methodname = 'method\u20ac\xa4'
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15',
methodname=methodname)
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
self.assertEqual(xmlrpclib.loads(strg)[1], methodname)
def test_dump_bytes(self):
sample = b"my dog has fleas"
self.assertEqual(sample, xmlrpclib.Binary(sample))
for type_ in bytes, bytearray, xmlrpclib.Binary:
value = type_(sample)
s = xmlrpclib.dumps((value,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newvalue,) = result
self.assertEqual(newvalue, sample)
self.assertIs(type(newvalue), bytes)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newvalue,) = result
self.assertEqual(newvalue, sample)
self.assertIs(type(newvalue), xmlrpclib.Binary)
self.assertIsNone(m)
def test_loads_unsupported(self):
ResponseError = xmlrpclib.ResponseError
data = '<params><param><value><spam/></value></param></params>'
self.assertRaises(ResponseError, xmlrpclib.loads, data)
data = ('<params><param><value><array>'
'<value><spam/></value>'
'</array></value></param></params>')
self.assertRaises(ResponseError, xmlrpclib.loads, data)
data = ('<params><param><value><struct>'
'<member><name>a</name><value><spam/></value></member>'
'<member><name>b</name><value><spam/></value></member>'
'</struct></value></param></params>')
self.assertRaises(ResponseError, xmlrpclib.loads, data)
def check_loads(self, s, value, **kwargs):
dump = '<params><param><value>%s</value></param></params>' % s
result, m = xmlrpclib.loads(dump, **kwargs)
(newvalue,) = result
self.assertEqual(newvalue, value)
self.assertIs(type(newvalue), type(value))
self.assertIsNone(m)
def test_load_standard_types(self):
check = self.check_loads
check('string', 'string')
check('<string>string</string>', 'string')
check('<string>𝔘𝔫𝔦𝔠𝔬𝔡𝔢 string</string>', '𝔘𝔫𝔦𝔠𝔬𝔡𝔢 string')
check('<int>2056183947</int>', 2056183947)
check('<int>-2056183947</int>', -2056183947)
check('<i4>2056183947</i4>', 2056183947)
check('<double>46093.78125</double>', 46093.78125)
check('<boolean>0</boolean>', False)
check('<base64>AGJ5dGUgc3RyaW5n/w==</base64>',
xmlrpclib.Binary(b'\x00byte string\xff'))
check('<base64>AGJ5dGUgc3RyaW5n/w==</base64>',
b'\x00byte string\xff', use_builtin_types=True)
check('<dateTime.iso8601>20050210T11:41:23</dateTime.iso8601>',
xmlrpclib.DateTime('20050210T11:41:23'))
check('<dateTime.iso8601>20050210T11:41:23</dateTime.iso8601>',
datetime.datetime(2005, 2, 10, 11, 41, 23),
use_builtin_types=True)
check('<array><data>'
'<value><int>1</int></value><value><int>2</int></value>'
'</data></array>', [1, 2])
check('<struct>'
'<member><name>b</name><value><int>2</int></value></member>'
'<member><name>a</name><value><int>1</int></value></member>'
'</struct>', {'a': 1, 'b': 2})
def test_load_extension_types(self):
check = self.check_loads
check('<nil/>', None)
check('<ex:nil/>', None)
check('<i1>205</i1>', 205)
check('<i2>20561</i2>', 20561)
check('<i8>9876543210</i8>', 9876543210)
check('<biginteger>98765432100123456789</biginteger>',
98765432100123456789)
check('<float>93.78125</float>', 93.78125)
check('<bigdecimal>9876543210.0123456789</bigdecimal>',
decimal.Decimal('9876543210.0123456789'))
def test_get_host_info(self):
# see bug #3613, this raised a TypeError
transp = xmlrpc.client.Transport()
self.assertEqual(transp.get_host_info("[email protected]"),
('host.tld',
[('Authorization', 'Basic dXNlcg==')], {}))
def test_ssl_presence(self):
try:
import ssl
except ImportError:
has_ssl = False
else:
has_ssl = True
try:
xmlrpc.client.ServerProxy('https://localhost:9999').bad_function()
except NotImplementedError:
self.assertFalse(has_ssl, "xmlrpc client's error with SSL support")
except OSError:
self.assertTrue(has_ssl)
def test_keepalive_disconnect(self):
class RequestHandler(http.server.BaseHTTPRequestHandler):
protocol_version = "HTTP/1.1"
handled = False
def do_POST(self):
length = int(self.headers.get("Content-Length"))
self.rfile.read(length)
if self.handled:
self.close_connection = True
return
response = xmlrpclib.dumps((5,), methodresponse=True)
response = response.encode()
self.send_response(http.HTTPStatus.OK)
self.send_header("Content-Length", len(response))
self.end_headers()
self.wfile.write(response)
self.handled = True
self.close_connection = False
def log_message(self, format, *args):
# don't clobber sys.stderr
pass
def run_server():
server.socket.settimeout(float(1)) # Don't hang if client fails
server.handle_request() # First request and attempt at second
server.handle_request() # Retried second request
server = http.server.HTTPServer((socket_helper.HOST, 0), RequestHandler)
self.addCleanup(server.server_close)
thread = threading.Thread(target=run_server)
thread.start()
self.addCleanup(thread.join)
url = "http://{}:{}/".format(*server.server_address)
with xmlrpclib.ServerProxy(url) as p:
self.assertEqual(p.method(), 5)
self.assertEqual(p.method(), 5)
class SimpleXMLRPCDispatcherTestCase(unittest.TestCase):
class DispatchExc(Exception):
"""Raised inside the dispatched functions when checking for
chained exceptions"""
def test_call_registered_func(self):
"""Calls explicitly registered function"""
# Makes sure any exception raised inside the function has no other
# exception chained to it
exp_params = 1, 2, 3
def dispatched_func(*params):
raise self.DispatchExc(params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_function(dispatched_func)
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch('dispatched_func', exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_params,))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_call_instance_func(self):
"""Calls a registered instance attribute as a function"""
# Makes sure any exception raised inside the function has no other
# exception chained to it
exp_params = 1, 2, 3
class DispatchedClass:
def dispatched_func(self, *params):
raise SimpleXMLRPCDispatcherTestCase.DispatchExc(params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(DispatchedClass())
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch('dispatched_func', exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_params,))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_call_dispatch_func(self):
"""Calls the registered instance's `_dispatch` function"""
# Makes sure any exception raised inside the function has no other
# exception chained to it
exp_method = 'method'
exp_params = 1, 2, 3
class TestInstance:
def _dispatch(self, method, params):
raise SimpleXMLRPCDispatcherTestCase.DispatchExc(
method, params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(TestInstance())
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch(exp_method, exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_method, exp_params))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_registered_func_is_none(self):
"""Calls explicitly registered function which is None"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_function(None, name='method')
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
def test_instance_has_no_func(self):
"""Attempts to call nonexistent function on a registered instance"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(object())
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
def test_cannot_locate_func(self):
"""Calls a function that the dispatcher cannot locate"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
class HelperTestCase(unittest.TestCase):
def test_escape(self):
self.assertEqual(xmlrpclib.escape("a&b"), "a&b")
self.assertEqual(xmlrpclib.escape("a<b"), "a<b")
self.assertEqual(xmlrpclib.escape("a>b"), "a>b")
class FaultTestCase(unittest.TestCase):
def test_repr(self):
f = xmlrpclib.Fault(42, 'Test Fault')
self.assertEqual(repr(f), "<Fault 42: 'Test Fault'>")
self.assertEqual(repr(f), str(f))
def test_dump_fault(self):
f = xmlrpclib.Fault(42, 'Test Fault')
s = xmlrpclib.dumps((f,))
(newf,), m = xmlrpclib.loads(s)
self.assertEqual(newf, {'faultCode': 42, 'faultString': 'Test Fault'})
self.assertEqual(m, None)
s = xmlrpclib.Marshaller().dumps(f)
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, s)
def test_dotted_attribute(self):
# this will raise AttributeError because code don't want us to use
# private methods
self.assertRaises(AttributeError,
xmlrpc.server.resolve_dotted_attribute, str, '__add')
self.assertTrue(xmlrpc.server.resolve_dotted_attribute(str, 'title'))
class DateTimeTestCase(unittest.TestCase):
def test_default(self):
with mock.patch('time.localtime') as localtime_mock:
time_struct = time.struct_time(
[2013, 7, 15, 0, 24, 49, 0, 196, 0])
localtime_mock.return_value = time_struct
localtime = time.localtime()
t = xmlrpclib.DateTime()
self.assertEqual(str(t),
time.strftime("%Y%m%dT%H:%M:%S", localtime))
def test_time(self):
d = 1181399930.036952
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t),
time.strftime("%Y%m%dT%H:%M:%S", time.localtime(d)))
def test_time_tuple(self):
d = (2007,6,9,10,38,50,5,160,0)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070609T10:38:50')
def test_time_struct(self):
d = time.localtime(1181399930.036952)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", d))
def test_datetime_datetime(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070102T03:04:05')
def test_repr(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
val ="<DateTime '20070102T03:04:05' at %#x>" % id(t)
self.assertEqual(repr(t), val)
def test_decode(self):
d = ' 20070908T07:11:13 '
t1 = xmlrpclib.DateTime()
t1.decode(d)
tref = xmlrpclib.DateTime(datetime.datetime(2007,9,8,7,11,13))
self.assertEqual(t1, tref)
t2 = xmlrpclib._datetime(d)
self.assertEqual(t2, tref)
def test_comparison(self):
now = datetime.datetime.now()
dtime = xmlrpclib.DateTime(now.timetuple())
# datetime vs. DateTime
self.assertTrue(dtime == now)
self.assertTrue(now == dtime)
then = now + datetime.timedelta(seconds=4)
self.assertTrue(then >= dtime)
self.assertTrue(dtime < then)
# str vs. DateTime
dstr = now.strftime("%Y%m%dT%H:%M:%S")
self.assertTrue(dtime == dstr)
self.assertTrue(dstr == dtime)
dtime_then = xmlrpclib.DateTime(then.timetuple())
self.assertTrue(dtime_then >= dstr)
self.assertTrue(dstr < dtime_then)
# some other types
dbytes = dstr.encode('ascii')
dtuple = now.timetuple()
self.assertFalse(dtime == 1970)
self.assertTrue(dtime != dbytes)
self.assertFalse(dtime == bytearray(dbytes))
self.assertTrue(dtime != dtuple)
with self.assertRaises(TypeError):
dtime < float(1970)
with self.assertRaises(TypeError):
dtime > dbytes
with self.assertRaises(TypeError):
dtime <= bytearray(dbytes)
with self.assertRaises(TypeError):
dtime >= dtuple
self.assertTrue(dtime == ALWAYS_EQ)
self.assertFalse(dtime != ALWAYS_EQ)
self.assertTrue(dtime < LARGEST)
self.assertFalse(dtime > LARGEST)
self.assertTrue(dtime <= LARGEST)
self.assertFalse(dtime >= LARGEST)
self.assertFalse(dtime < SMALLEST)
self.assertTrue(dtime > SMALLEST)
self.assertFalse(dtime <= SMALLEST)
self.assertTrue(dtime >= SMALLEST)
class BinaryTestCase(unittest.TestCase):
# XXX What should str(Binary(b"\xff")) return? I'm choosing "\xff"
# for now (i.e. interpreting the binary data as Latin-1-encoded
# text). But this feels very unsatisfactory. Perhaps we should
# only define repr(), and return r"Binary(b'\xff')" instead?
def test_default(self):
t = xmlrpclib.Binary()
self.assertEqual(str(t), '')
def test_string(self):
d = b'\x01\x02\x03abc123\xff\xfe'
t = xmlrpclib.Binary(d)
self.assertEqual(str(t), str(d, "latin-1"))
def test_decode(self):
d = b'\x01\x02\x03abc123\xff\xfe'
de = base64.encodebytes(d)
t1 = xmlrpclib.Binary()
t1.decode(de)
self.assertEqual(str(t1), str(d, "latin-1"))
t2 = xmlrpclib._binary(de)
self.assertEqual(str(t2), str(d, "latin-1"))
ADDR = PORT = URL = None
# The evt is set twice. First when the server is ready to serve.
# Second when the server has been shutdown. The user must clear
# the event after it has been set the first time to catch the second set.
def http_server(evt, numrequests, requestHandler=None, encoding=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
class Fixture:
@staticmethod
def getData():
return '42'
class MyXMLRPCServer(xmlrpc.server.SimpleXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = xmlrpc.server.SimpleXMLRPCRequestHandler
serv = MyXMLRPCServer(("localhost", 0), requestHandler,
encoding=encoding,
logRequests=False, bind_and_activate=False)
try:
serv.server_bind()
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
serv.register_introspection_functions()
serv.register_multicall_functions()
serv.register_function(pow)
serv.register_function(lambda x: x, 'têšt')
@serv.register_function
def my_function():
'''This is my function'''
return True
@serv.register_function(name='add')
def _(x, y):
return x + y
testInstance = TestInstanceClass()
serv.register_instance(testInstance, allow_dotted_names=True)
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except TimeoutError:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
def http_multi_server(evt, numrequests, requestHandler=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(xmlrpc.server.MultiPathXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = xmlrpc.server.SimpleXMLRPCRequestHandler
class MyRequestHandler(requestHandler):
rpc_paths = []
class BrokenDispatcher:
def _marshaled_dispatch(self, data, dispatch_method=None, path=None):
raise RuntimeError("broken dispatcher")
serv = MyXMLRPCServer(("localhost", 0), MyRequestHandler,
logRequests=False, bind_and_activate=False)
serv.socket.settimeout(3)
serv.server_bind()
try:
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
paths = [
"/foo", "/foo/bar",
"/foo?k=v", "/foo#frag", "/foo?k=v#frag",
"", "/", "/RPC2", "?k=v", "#frag",
]
for path in paths:
d = serv.add_dispatcher(path, xmlrpc.server.SimpleXMLRPCDispatcher())
d.register_introspection_functions()
d.register_multicall_functions()
d.register_function(lambda p=path: p, 'test')
serv.get_dispatcher(paths[0]).register_function(pow)
serv.get_dispatcher(paths[1]).register_function(lambda x,y: x+y, 'add')
serv.add_dispatcher("/is/broken", BrokenDispatcher())
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except TimeoutError:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
# This function prevents errors like:
# <ProtocolError for localhost:57527/RPC2: 500 Internal Server Error>
def is_unavailable_exception(e):
'''Returns True if the given ProtocolError is the product of a server-side
exception caused by the 'temporarily unavailable' response sometimes
given by operations on non-blocking sockets.'''
# sometimes we get a -1 error code and/or empty headers
try:
if e.errcode == -1 or e.headers is None:
return True
exc_mess = e.headers.get('X-exception')
except AttributeError:
# Ignore OSErrors here.
exc_mess = str(e)
if exc_mess and 'temporarily unavailable' in exc_mess.lower():
return True
def make_request_and_skipIf(condition, reason):
# If we skip the test, we have to make a request because
# the server created in setUp blocks expecting one to come in.
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
try:
xmlrpclib.ServerProxy(URL).my_function()
except (xmlrpclib.ProtocolError, OSError) as e:
if not is_unavailable_exception(e):
raise
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
class BaseServerTestCase(unittest.TestCase):
requestHandler = None
request_count = 1
threadFunc = staticmethod(http_server)
def setUp(self):
# enable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, self.request_count, self.requestHandler)
thread = threading.Thread(target=self.threadFunc, args=serv_args)
thread.start()
self.addCleanup(thread.join)
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# disable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = False
class SimpleServerTestCase(BaseServerTestCase):
def test_simple1(self):
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_nonascii(self):
start_string = 'P\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}t'
end_string = 'h\N{LATIN SMALL LETTER O WITH HORN}n'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_client_encoding(self):
start_string = '\u20ac'
end_string = '\xa4'
try:
p = xmlrpclib.ServerProxy(URL, encoding='iso-8859-15')
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_nonascii_methodname(self):
try:
p = xmlrpclib.ServerProxy(URL, encoding='ascii')
self.assertEqual(p.têšt(42), 42)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_404(self):
# send POST with http.client, it should return 404 header and
# 'Not Found' message.
with contextlib.closing(http.client.HTTPConnection(ADDR, PORT)) as conn:
conn.request('POST', '/this-is-not-valid')
response = conn.getresponse()
self.assertEqual(response.status, 404)
self.assertEqual(response.reason, 'Not Found')
def test_introspection1(self):
expected_methods = set(['pow', 'div', 'my_function', 'add', 'têšt',
'system.listMethods', 'system.methodHelp',
'system.methodSignature', 'system.multicall',
'Fixture'])
try:
p = xmlrpclib.ServerProxy(URL)
meth = p.system.listMethods()
self.assertEqual(set(meth), expected_methods)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection2(self):
try:
# test _methodHelp()
p = xmlrpclib.ServerProxy(URL)
divhelp = p.system.methodHelp('div')
self.assertEqual(divhelp, 'This is the div function')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_introspection3(self):
try:
# test native doc
p = xmlrpclib.ServerProxy(URL)
myfunction = p.system.methodHelp('my_function')
self.assertEqual(myfunction, 'This is my function')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection4(self):
# the SimpleXMLRPCServer doesn't support signatures, but
# at least check that we can try making the call
try:
p = xmlrpclib.ServerProxy(URL)
divsig = p.system.methodSignature('div')
self.assertEqual(divsig, 'signatures not supported')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.add(2,3)
multicall.pow(6,8)
multicall.div(127,42)
add_result, pow_result, div_result = multicall()
self.assertEqual(add_result, 2+3)
self.assertEqual(pow_result, 6**8)
self.assertEqual(div_result, 127//42)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_non_existing_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.this_is_not_exists()
result = multicall()
# result.results contains;
# [{'faultCode': 1, 'faultString': '<class \'exceptions.Exception\'>:'
# 'method "this_is_not_exists" is not supported'>}]
self.assertEqual(result.results[0]['faultCode'], 1)
self.assertEqual(result.results[0]['faultString'],
'<class \'Exception\'>:method "this_is_not_exists" '
'is not supported')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_dotted_attribute(self):
# Raises an AttributeError because private methods are not allowed.
self.assertRaises(AttributeError,
xmlrpc.server.resolve_dotted_attribute, str, '__add')
self.assertTrue(xmlrpc.server.resolve_dotted_attribute(str, 'title'))
# Get the test to run faster by sending a request with test_simple1.
# This avoids waiting for the socket timeout.
self.test_simple1()
def test_allow_dotted_names_true(self):
# XXX also need allow_dotted_names_false test.
server = xmlrpclib.ServerProxy("http://%s:%d/RPC2" % (ADDR, PORT))
data = server.Fixture.getData()
self.assertEqual(data, '42')
def test_unicode_host(self):
server = xmlrpclib.ServerProxy("http://%s:%d/RPC2" % (ADDR, PORT))
self.assertEqual(server.add("a", "\xe9"), "a\xe9")
def test_partial_post(self):
# Check that a partial POST doesn't make the server loop: issue #14001.
with contextlib.closing(socket.create_connection((ADDR, PORT))) as conn:
conn.send('POST /RPC2 HTTP/1.0\r\n'
'Content-Length: 100\r\n\r\n'
'bye HTTP/1.1\r\n'
f'Host: {ADDR}:{PORT}\r\n'
'Accept-Encoding: identity\r\n'
'Content-Length: 0\r\n\r\n'.encode('ascii'))
def test_context_manager(self):
with xmlrpclib.ServerProxy(URL) as server:
server.add(2, 3)
self.assertNotEqual(server('transport')._connection,
(None, None))
self.assertEqual(server('transport')._connection,
(None, None))
def test_context_manager_method_error(self):
try:
with xmlrpclib.ServerProxy(URL) as server:
server.add(2, "a")
except xmlrpclib.Fault:
pass
self.assertEqual(server('transport')._connection,
(None, None))
class SimpleServerEncodingTestCase(BaseServerTestCase):
@staticmethod
def threadFunc(evt, numrequests, requestHandler=None, encoding=None):
http_server(evt, numrequests, requestHandler, 'iso-8859-15')
def test_server_encoding(self):
start_string = '\u20ac'
end_string = '\xa4'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
class MultiPathServerTestCase(BaseServerTestCase):
threadFunc = staticmethod(http_multi_server)
request_count = 2
def test_path1(self):
p = xmlrpclib.ServerProxy(URL+"/foo")
self.assertEqual(p.pow(6,8), 6**8)
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_path2(self):
p = xmlrpclib.ServerProxy(URL+"/foo/bar")
self.assertEqual(p.add(6,8), 6+8)
self.assertRaises(xmlrpclib.Fault, p.pow, 6, 8)
def test_path3(self):
p = xmlrpclib.ServerProxy(URL+"/is/broken")
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_invalid_path(self):
p = xmlrpclib.ServerProxy(URL+"/invalid")
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_path_query_fragment(self):
p = xmlrpclib.ServerProxy(URL+"/foo?k=v#frag")
self.assertEqual(p.test(), "/foo?k=v#frag")
def test_path_fragment(self):
p = xmlrpclib.ServerProxy(URL+"/foo#frag")
self.assertEqual(p.test(), "/foo#frag")
def test_path_query(self):
p = xmlrpclib.ServerProxy(URL+"/foo?k=v")
self.assertEqual(p.test(), "/foo?k=v")
def test_empty_path(self):
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.test(), "/RPC2")
def test_root_path(self):
p = xmlrpclib.ServerProxy(URL + "/")
self.assertEqual(p.test(), "/")
def test_empty_path_query(self):
p = xmlrpclib.ServerProxy(URL + "?k=v")
self.assertEqual(p.test(), "?k=v")
def test_empty_path_fragment(self):
p = xmlrpclib.ServerProxy(URL + "#frag")
self.assertEqual(p.test(), "#frag")
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class BaseKeepaliveServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
parentClass = xmlrpc.server.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
myRequests = []
def handle(self):
self.myRequests.append([])
self.reqidx = len(self.myRequests)-1
return self.parentClass.handle(self)
def handle_one_request(self):
result = self.parentClass.handle_one_request(self)
self.myRequests[self.reqidx].append(self.raw_requestline)
return result
requestHandler = RequestHandler
def setUp(self):
#clear request log
self.RequestHandler.myRequests = []
return BaseServerTestCase.setUp(self)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class KeepaliveServerTestCase1(BaseKeepaliveServerTestCase):
def test_two(self):
p = xmlrpclib.ServerProxy(URL)
#do three requests.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
#they should have all been handled by a single request handler
self.assertEqual(len(self.RequestHandler.myRequests), 1)
#check that we did at least two (the third may be pending append
#due to thread scheduling)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
#test special attribute access on the serverproxy, through the __call__
#function.
class KeepaliveServerTestCase2(BaseKeepaliveServerTestCase):
#ask for two keepalive requests to be handled.
request_count=2
def test_close(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")() #this should trigger a new keep-alive request
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
#they should have all been two request handlers, each having logged at least
#two complete requests
self.assertEqual(len(self.RequestHandler.myRequests), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-2]), 2)
def test_transport(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
p("transport").close() #same as above, really.
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
self.assertEqual(len(self.RequestHandler.myRequests), 2)
#A test case that verifies that gzip encoding works in both directions
#(for a request and the response)
@unittest.skipIf(gzip is None, 'requires gzip')
class GzipServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
parentClass = xmlrpc.server.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
def do_POST(self):
#store content of last request in class
self.__class__.content_length = int(self.headers["content-length"])
return self.parentClass.do_POST(self)
requestHandler = RequestHandler
class Transport(xmlrpclib.Transport):
#custom transport, stores the response length for our perusal
fake_gzip = False
def parse_response(self, response):
self.response_length=int(response.getheader("content-length", 0))
return xmlrpclib.Transport.parse_response(self, response)
def send_content(self, connection, body):
if self.fake_gzip:
#add a lone gzip header to induce decode error remotely
connection.putheader("Content-Encoding", "gzip")
return xmlrpclib.Transport.send_content(self, connection, body)
def setUp(self):
BaseServerTestCase.setUp(self)
def test_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
p = xmlrpclib.ServerProxy(URL, transport=t)
self.assertEqual(p.pow(6,8), 6**8)
a = self.RequestHandler.content_length
t.encode_threshold = 0 #turn on request encoding
self.assertEqual(p.pow(6,8), 6**8)
b = self.RequestHandler.content_length
self.assertTrue(a>b)
p("close")()
def test_bad_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
t.fake_gzip = True
p = xmlrpclib.ServerProxy(URL, transport=t)
cm = self.assertRaisesRegex(xmlrpclib.ProtocolError,
re.compile(r"\b400\b"))
with cm:
p.pow(6, 8)
p("close")()
def test_gzip_response(self):
t = self.Transport()
p = xmlrpclib.ServerProxy(URL, transport=t)
old = self.requestHandler.encode_threshold
self.requestHandler.encode_threshold = None #no encoding
self.assertEqual(p.pow(6,8), 6**8)
a = t.response_length
self.requestHandler.encode_threshold = 0 #always encode
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
b = t.response_length
self.requestHandler.encode_threshold = old
self.assertTrue(a>b)
@unittest.skipIf(gzip is None, 'requires gzip')
class GzipUtilTestCase(unittest.TestCase):
def test_gzip_decode_limit(self):
max_gzip_decode = 20 * 1024 * 1024
data = b'\0' * max_gzip_decode
encoded = xmlrpclib.gzip_encode(data)
decoded = xmlrpclib.gzip_decode(encoded)
self.assertEqual(len(decoded), max_gzip_decode)
data = b'\0' * (max_gzip_decode + 1)
encoded = xmlrpclib.gzip_encode(data)
with self.assertRaisesRegex(ValueError,
"max gzipped payload length exceeded"):
xmlrpclib.gzip_decode(encoded)
xmlrpclib.gzip_decode(encoded, max_decode=-1)
class HeadersServerTestCase(BaseServerTestCase):
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
test_headers = None
def do_POST(self):
self.__class__.test_headers = self.headers
return super().do_POST()
requestHandler = RequestHandler
standard_headers = [
'Host', 'Accept-Encoding', 'Content-Type', 'User-Agent',
'Content-Length']
def setUp(self):
self.RequestHandler.test_headers = None
return super().setUp()
def assertContainsAdditionalHeaders(self, headers, additional):
expected_keys = sorted(self.standard_headers + list(additional.keys()))
self.assertListEqual(sorted(headers.keys()), expected_keys)
for key, value in additional.items():
self.assertEqual(headers.get(key), value)
def test_header(self):
p = xmlrpclib.ServerProxy(URL, headers=[('X-Test', 'foo')])
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {'X-Test': 'foo'})
def test_header_many(self):
p = xmlrpclib.ServerProxy(
URL, headers=[('X-Test', 'foo'), ('X-Test-Second', 'bar')])
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(
headers, {'X-Test': 'foo', 'X-Test-Second': 'bar'})
def test_header_empty(self):
p = xmlrpclib.ServerProxy(URL, headers=[])
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {})
def test_header_tuple(self):
p = xmlrpclib.ServerProxy(URL, headers=(('X-Test', 'foo'),))
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {'X-Test': 'foo'})
def test_header_items(self):
p = xmlrpclib.ServerProxy(URL, headers={'X-Test': 'foo'}.items())
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {'X-Test': 'foo'})
#Test special attributes of the ServerProxy object
class ServerProxyTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
# Actual value of the URL doesn't matter if it is a string in
# the correct format.
self.url = 'http://fake.localhost'
def test_close(self):
p = xmlrpclib.ServerProxy(self.url)
self.assertEqual(p('close')(), None)
def test_transport(self):
t = xmlrpclib.Transport()
p = xmlrpclib.ServerProxy(self.url, transport=t)
self.assertEqual(p('transport'), t)
# This is a contrived way to make a failure occur on the server side
# in order to test the _send_traceback_header flag on the server
class FailingMessageClass(http.client.HTTPMessage):
def get(self, key, failobj=None):
key = key.lower()
if key == 'content-length':
return 'I am broken'
return super().get(key, failobj)
class FailingServerTestCase(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, 1)
thread = threading.Thread(target=http_server, args=serv_args)
thread.start()
self.addCleanup(thread.join)
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# reset flag
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = False
# reset message class
default_class = http.client.HTTPMessage
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = default_class
def test_basic(self):
# check that flag is false by default
flagval = xmlrpc.server.SimpleXMLRPCServer._send_traceback_header
self.assertEqual(flagval, False)
# enable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
# test a call that shouldn't fail just as a smoke test
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_fail_no_info(self):
# use the broken message class
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# The two server-side error headers shouldn't be sent back in this case
self.assertTrue(e.headers.get("X-exception") is None)
self.assertTrue(e.headers.get("X-traceback") is None)
else:
self.fail('ProtocolError not raised')
def test_fail_with_info(self):
# use the broken message class
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
# Check that errors in the server send back exception/traceback
# info when flag is set
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# We should get error info in the response
expected_err = "invalid literal for int() with base 10: 'I am broken'"
self.assertEqual(e.headers.get("X-exception"), expected_err)
self.assertTrue(e.headers.get("X-traceback") is not None)
else:
self.fail('ProtocolError not raised')
@contextlib.contextmanager
def captured_stdout(encoding='utf-8'):
"""A variation on support.captured_stdout() which gives a text stream
having a `buffer` attribute.
"""
orig_stdout = sys.stdout
sys.stdout = io.TextIOWrapper(io.BytesIO(), encoding=encoding)
try:
yield sys.stdout
finally:
sys.stdout = orig_stdout
class CGIHandlerTestCase(unittest.TestCase):
def setUp(self):
self.cgi = xmlrpc.server.CGIXMLRPCRequestHandler()
def tearDown(self):
self.cgi = None
def test_cgi_get(self):
with os_helper.EnvironmentVarGuard() as env:
env['REQUEST_METHOD'] = 'GET'
# if the method is GET and no request_text is given, it runs handle_get
# get sysout output
with captured_stdout(encoding=self.cgi.encoding) as data_out:
self.cgi.handle_request()
# parse Status header
data_out.seek(0)
handle = data_out.read()
status = handle.split()[1]
message = ' '.join(handle.split()[2:4])
self.assertEqual(status, '400')
self.assertEqual(message, 'Bad Request')
def test_cgi_xmlrpc_response(self):
data = """<?xml version='1.0'?>
<methodCall>
<methodName>test_method</methodName>
<params>
<param>
<value><string>foo</string></value>
</param>
<param>
<value><string>bar</string></value>
</param>
</params>
</methodCall>
"""
with os_helper.EnvironmentVarGuard() as env, \
captured_stdout(encoding=self.cgi.encoding) as data_out, \
support.captured_stdin() as data_in:
data_in.write(data)
data_in.seek(0)
env['CONTENT_LENGTH'] = str(len(data))
self.cgi.handle_request()
data_out.seek(0)
# will respond exception, if so, our goal is achieved ;)
handle = data_out.read()
# start with 44th char so as not to get http header, we just
# need only xml
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, handle[44:])
# Also test the content-length returned by handle_request
# Using the same test method inorder to avoid all the datapassing
# boilerplate code.
# Test for bug: http://bugs.python.org/issue5040
content = handle[handle.find("<?xml"):]
self.assertEqual(
int(re.search(r'Content-Length: (\d+)', handle).group(1)),
len(content))
class UseBuiltinTypesTestCase(unittest.TestCase):
def test_use_builtin_types(self):
# SimpleXMLRPCDispatcher.__init__ accepts use_builtin_types, which
# makes all dispatch of binary data as bytes instances, and all
# dispatch of datetime argument as datetime.datetime instances.
self.log = []
expected_bytes = b"my dog has fleas"
expected_date = datetime.datetime(2008, 5, 26, 18, 25, 12)
marshaled = xmlrpclib.dumps((expected_bytes, expected_date), 'foobar')
def foobar(*args):
self.log.extend(args)
handler = xmlrpc.server.SimpleXMLRPCDispatcher(
allow_none=True, encoding=None, use_builtin_types=True)
handler.register_function(foobar)
handler._marshaled_dispatch(marshaled)
self.assertEqual(len(self.log), 2)
mybytes, mydate = self.log
self.assertEqual(self.log, [expected_bytes, expected_date])
self.assertIs(type(mydate), datetime.datetime)
self.assertIs(type(mybytes), bytes)
def test_cgihandler_has_use_builtin_types_flag(self):
handler = xmlrpc.server.CGIXMLRPCRequestHandler(use_builtin_types=True)
self.assertTrue(handler.use_builtin_types)
def test_xmlrpcserver_has_use_builtin_types_flag(self):
server = xmlrpc.server.SimpleXMLRPCServer(("localhost", 0),
use_builtin_types=True)
server.server_close()
self.assertTrue(server.use_builtin_types)
def setUpModule():
thread_info = threading_helper.threading_setup()
unittest.addModuleCleanup(threading_helper.threading_cleanup, *thread_info)
if __name__ == "__main__":
unittest.main()
| 38.686016 | 87 | 0.618265 |
f6b5b720842385a7b51cb3a835edbdd142a0c9c2
| 545 |
py
|
Python
|
server/manage.py
|
ewen/s3-nginx-blog-post
|
91ff11eacb832a78affd4859be6d566716bb29d9
|
[
"MIT"
] | 4 |
2019-05-23T10:20:55.000Z
|
2021-10-02T08:17:55.000Z
|
server/manage.py
|
ewen/s3-nginx-blog-post
|
91ff11eacb832a78affd4859be6d566716bb29d9
|
[
"MIT"
] | 1 |
2018-03-09T23:26:14.000Z
|
2018-03-12T07:49:41.000Z
|
server/manage.py
|
ewen/s3-nginx-blog-post
|
91ff11eacb832a78affd4859be6d566716bb29d9
|
[
"MIT"
] | 1 |
2019-10-24T19:48:51.000Z
|
2019-10-24T19:48:51.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "s3_nginx_test.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.0625 | 77 | 0.689908 |
bd7ebb6385380fdbb8018291417d0843da668628
| 278 |
py
|
Python
|
Python/Courses/Python-Tutorials.Telusko/00.Fundamentals/08.01-Functions.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Telusko/00.Fundamentals/08.01-Functions.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Telusko/00.Fundamentals/08.01-Functions.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
def greet():
print("Hello")
print("How do you do?")
def greet2(name):
print("hello", name)
print("How do you do?")
def add_numbers(n1, n2):
result = n1 + n2
return result
greet()
greet()
greet()
# greet2("Jack")
rs = add_numbers(6.7, 5.4)
print(rs)
| 13.238095 | 27 | 0.589928 |
0394a9836014dd7af06e635a94d975818820bdd7
| 24,479 |
py
|
Python
|
ferenda/facet.py
|
redhog/ferenda
|
6935e26fdc63adc68b8e852292456b8d9155b1f7
|
[
"BSD-2-Clause"
] | 18 |
2015-03-12T17:42:44.000Z
|
2021-12-27T10:32:22.000Z
|
ferenda/facet.py
|
redhog/ferenda
|
6935e26fdc63adc68b8e852292456b8d9155b1f7
|
[
"BSD-2-Clause"
] | 13 |
2016-01-27T10:19:07.000Z
|
2021-12-13T20:24:36.000Z
|
ferenda/facet.py
|
redhog/ferenda
|
6935e26fdc63adc68b8e852292456b8d9155b1f7
|
[
"BSD-2-Clause"
] | 6 |
2016-11-28T15:41:29.000Z
|
2022-01-08T11:16:48.000Z
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
unicode_literals, print_function)
from builtins import *
from datetime import date, datetime
from rdflib import URIRef, Namespace
from rdflib.namespace import RDF, RDFS, DC, SKOS, FOAF, DCTERMS
SCHEMA = Namespace("http://schema.org/")
BIBO = Namespace("http://purl.org/ontology/bibo/")
from ferenda import fulltextindex # to get the IndexedType classes
from ferenda import util
class Facet(object):
"""Create a facet from the given rdftype and some optional parameters.
:param rdftype: The type of facet being created
:type rdftype: rdflib.term.URIRef
:param label: A template for the label property of TocPageset objects
created from this facet
:type label: str
:param pagetitle: A template for the title property of TocPage objects
created from this facet
:type pagetitle: str
:param indexingtype: Object specifying how to store the data selected
by this facet in the fulltext index
:type indexingtype: ferenda.fulltext.IndexedType
:param selector: A function that takes *(row, binding, resource_graph)*
and returns a string acting as a category of some kind
:type selector: callable
:param key: A function that takes *(row, binding, resource_graph)* and
returns a string usable for sorting
:type key: callable
:param toplevel_only: Whether this facet should be applied to documents
only, or any named (ie. given an URI) fragment of
a document.
:type toplevel_only: bool
:param use_for_toc: Whether this facet should be used for TOC generation
:type use_for_toc: bool
:param use_for_feed: Whether this facet should be used for newsfeed
generation
:type use_for_feed: bool
:param selector_descending: Whether the values returned by ``selector``
should be presented in lexical descending
order
:type selector_descending: bool
:param key_descending: Whether documents, when sorted through the ``key``
function, should be presented in reverse order.
:type key_descending: bool
:param multiple_values: Whether more than one instance of the ``rdftype``
value should be processed (such as multiple
keywords each specified by one ``dcterms:subject``
triple).
:type multiple_values: bool
:param dimension_type: The general type of this facet -- can be ``"type"``
(values are ``rdf:type``), ``"ref"`` (values are
URIs), ``"year"`` (values are xsd:datetime or
similar), or ``"value"`` (values are string
literals).
:type dimension_type: str
:param dimension_label: An alternate label for this facet to be used if
the ``selector`` logic is more transformative
than selectional (ie. if it transforms dates to
True or False values depending on whether they're
April 1st, you might set this to "aprilfirst")
:type dimension_label: str
:param identificator: A function that takes *(row, binding,
resource_graph)* and returns an identifier-like
string usable as an id string or URL segment.
:type identificator: callable
If optional parameters aren't provided, then appropriate values are
selected if rdfrtype is one of some common rdf properties:
=================== ======================================================
facet description
=================== ======================================================
rdf:type Grouped by :py:meth:`~rdflib.graph.Graph.qname` of the
``rdf:type`` of the document, eg. ``foaf:Document``.
Not used for toc
------------------- ------------------------------------------------------
dcterms:title Grouped by first "sortable" letter, eg for a document
titled "The Little Prince" returns "l". Is used as a
facet for the API, but it's debatable if it's useful
------------------- ------------------------------------------------------
dcterms:identifier Also grouped by first sortable letter. When indexing,
the resulting fulltext index field has a high boost
value, which increases the chances of this document
ranking high when one searches for its identifier.
------------------- ------------------------------------------------------
dcterms:abstract Not used for toc
------------------- ------------------------------------------------------
dc:creator Should be a free-test (string literal) value
------------------- ------------------------------------------------------
dcterms:publisher Should be a URIRef
------------------- ------------------------------------------------------
dcterms:references
------------------- ------------------------------------------------------
dcterms:issued Used for grouping documents published/issued in the
same year
------------------- ------------------------------------------------------
dc:subject A document can have multiple dc:subjects and all are
indexed/processed
------------------- ------------------------------------------------------
dcterms:subject Works like dc:subject, but the value should be a
URIRef
------------------- ------------------------------------------------------
schema:free A boolean value
=================== ======================================================
This module contains a number of classmethods that can be used as
arguments to ``selector`` and ``key``, eg
>>> from rdflib import Namespace
>>> MYVOCAB = Namespace("http://example.org/vocab/")
>>> f = Facet(MYVOCAB.enactmentDate, selector=Facet.year)
>>> f.selector({'myvocab_enactmentDate': '2014-07-06'},
... 'myvocab_enactmentDate')
'2014'
"""
_resourcecache = {}
@classmethod
def defaultselector(cls, row, binding, resource_graph=None):
"""This returns ``row[binding]`` without any transformation.
>>> row = {"rdf_type": "http://purl.org/ontology/bibo/Book",
... "dcterms_title": "A Tale of Two Cities",
... "dcterms_issued": "1859-04-30",
... "dcterms_publisher": "http://example.org/chapman_hall",
... "schema_free": "true"}
>>> Facet.defaultselector(row, "dcterms_title")
'A Tale of Two Cities'
"""
return row[binding]
@classmethod
def defaultidentificator(cls, row, binding, resource_graph=None):
"""This returns ``row[binding]`` run through a simple slug-like transformation.
>>> row = {"rdf_type": "http://purl.org/ontology/bibo/Book",
... "dcterms_title": "A Tale of Two Cities",
... "dcterms_issued": "1859-04-30",
... "dcterms_publisher": "http://example.org/chapman_hall",
... "schema_free": "true"}
>>> Facet.defaultidentificator(row, "dcterms_title")
'a-tale-of-two-cities'
"""
return row[binding].lower().replace(" ", "-")
@classmethod
def year(cls, row, binding='dcterms_issued', resource_graph=None):
"""This returns the the year part of ``row[binding]``.
>>> row = {"rdf_type": "http://purl.org/ontology/bibo/Book",
... "dcterms_title": "A Tale of Two Cities",
... "dcterms_issued": "1859-04-30",
... "dcterms_publisher": "http://example.org/chapman_hall",
... "schema_free": "true"}
>>> Facet.year(row, "dcterms_issued")
'1859'
"""
d = row[binding]
if not isinstance(d, (datetime, date)):
datestring = d
# assume a date(time) like '2014-06-05T12:00:00', '2014-06-05'
# or even '2014-06'
formatstring = {19: "%Y-%m-%dT%H:%M:%S",
10: "%Y-%m-%d",
7: "%Y-%m"}[len(datestring)]
d = datetime.strptime(datestring, formatstring)
return str(d.year)
@classmethod
def booleanvalue(cls, row, binding='schema_free', resource_graph=None):
"""
Returns True iff row[binding] == "true", False otherwise.
>>> row = {"rdf_type": "http://purl.org/ontology/bibo/Book",
... "dcterms_title": "A Tale of Two Cities",
... "dcterms_issued": "1859-04-30",
... "dcterms_publisher": "http://example.org/chapman_hall",
... "schema_free": "true"}
>>> Facet.booleanvalue(row, "schema_free")
True
"""
# only 'true' is True, everything else is False (unless boolean)
return row[binding] if isinstance(row[binding], bool) else row[binding] == 'true'
@classmethod
def titlesortkey(cls, row, binding='dcterms_title', resource_graph=None):
"""Returns a version of row[binding] suitable for sorting. The
function :py:func:`~ferenda.util.title_sortkey` is used for
string transformation.
>>> row = {"rdf_type": "http://purl.org/ontology/bibo/Book",
... "dcterms_title": "A Tale of Two Cities",
... "dcterms_issued": "1859-04-30",
... "dcterms_publisher": "http://example.org/chapman_hall",
... "schema_free": "true"}
>>> Facet.titlesortkey(row, "dcterms_title")
'ataleoftwocities'
"""
return util.title_sortkey(row[binding])
@classmethod
def firstletter(cls, row, binding='dcterms_title', resource_graph=None):
"""Returns the first letter of row[binding], transformed into a
sortable string.
>>> row = {"rdf_type": "http://purl.org/ontology/bibo/Book",
... "dcterms_title": "A Tale of Two Cities",
... "dcterms_issued": "1859-04-30",
... "dcterms_publisher": "http://example.org/chapman_hall",
... "schema_free": "true"}
>>> Facet.firstletter(row, "dcterms_title")
'a'
"""
titlesortkey = cls.titlesortkey(row, binding)
if titlesortkey:
return titlesortkey[0]
else:
# Handle the degenerate case where title consists
# entirely of non-letters (eg. "---").
return "-"
@classmethod
def resourcelabel(cls, row, binding='dcterms_publisher', resource_graph=None):
"""Lookup a suitable text label for row[binding] in resource_graph.
>>> row = {"rdf_type": "http://purl.org/ontology/bibo/Book",
... "dcterms_title": "A Tale of Two Cities",
... "dcterms_issued": "1859-04-30",
... "dcterms_publisher": "http://example.org/chapman_hall",
... "schema_free": "true"}
>>> import rdflib
>>> resources = rdflib.Graph().parse(format="turtle", data=\"""
... @prefix foaf: <http://xmlns.com/foaf/0.1/> .
...
... <http://example.org/chapman_hall> a foaf:Organization;
... foaf:name "Chapman & Hall" .
...
... \""")
>>> Facet.resourcelabel(row, "dcterms_publisher", resources)
'Chapman & Hall'
"""
# FIXME: if the graph changes in between calls, the cache
# won't be invalidated and give incorrrect results
k = (row[binding], resource_graph.identifier)
if k in cls._resourcecache:
return cls._resourcecache[k]
uri = URIRef(row[binding])
for pred in (RDFS.label, SKOS.prefLabel, SKOS.altLabel, DCTERMS.title,
DCTERMS.alternative, FOAF.name, BIBO.identifier):
if resource_graph.value(uri, pred):
cls._resourcecache[k] = str(resource_graph.value(uri, pred))
return cls._resourcecache[k]
else:
cls._resourcecache[k] = row[binding]
return cls._resourcecache[k]
@classmethod
def sortresource(cls, row, binding='dcterms_publisher', resource_graph=None):
"""Returns a sortable version of the resource label for
``row[binding]``.
>>> row = {"rdf_type": "http://purl.org/ontology/bibo/Book",
... "dcterms_title": "A Tale of Two Cities",
... "dcterms_issued": "1859-04-30",
... "dcterms_publisher": "http://example.org/chapman_hall",
... "schema_free": "true"}
>>> import rdflib
>>> resources = rdflib.Graph().parse(format="turtle", data=\"""
... @prefix foaf: <http://xmlns.com/foaf/0.1/> .
...
... <http://example.org/chapman_hall> a foaf:Organization;
... foaf:name "Chapman & Hall" .
...
... \""")
>>> Facet.sortresource(row, "dcterms_publisher", resources)
'chapmanhall'
"""
row[binding] = cls.resourcelabel(row, binding, resource_graph)
return cls.titlesortkey(row, binding)
@classmethod
def term(cls, row, binding='dcterms_publisher', resource_graph=None):
"""Returns the leaf part of the URI found in ``row[binding]``.
>>> row = {"rdf_type": "http://purl.org/ontology/bibo/Book",
... "dcterms_title": "A Tale of Two Cities",
... "dcterms_issued": "1859-04-30",
... "dcterms_publisher": "http://example.org/chapman_hall",
... "schema_free": "true"}
>>> Facet.term(row, "dcterms_publisher")
'chapman_hall'
"""
ret = util.uri_leaf(row[binding])
if not ret:
# FIXME: get a logger and complain. but also get something
# that can act as a URI fragmentx
ret = row[binding].replace(" ", "_")
return ret
@classmethod
def qname(cls, row, binding='rdf_type', resource_graph=None):
"""Returns the qname of the rdf URIref contained in row[binding], as
determined by the namespace prefixes registered in
resource_graph.
>>> row = {"rdf_type": "http://purl.org/ontology/bibo/Book",
... "dcterms_title": "A Tale of Two Cities",
... "dcterms_issued": "1859-04-30",
... "dcterms_publisher": "http://example.org/chapman_hall",
... "schema_free": "true"}
>>> import rdflib
>>> resources = rdflib.Graph()
>>> resources.bind("bibo", "http://purl.org/ontology/bibo/")
>>> Facet.qname(row, "rdf_type", resources)
'bibo:Book'
"""
u = URIRef(row[binding])
return resource_graph.qname(u)
@classmethod
def resourcelabel_or_qname(cls, row, binding='rdf_type', resource_graph=None):
res = cls.resourcelabel(row, binding, resource_graph)
if res == row[binding]: # couldn't find a real label, try qname instead
res = cls.qname(row, binding, resource_graph)
return res
# define a number of default values, used if the user does not
# explicitly specify indexingtype/selector/key
defaults = None
# formatting directives for label/pagetitle:
# %(criteria)s = The human-readable criteria for sorting/dividing/faceting, eg "date of publication", "document title" or "publisher"
# %(selected)s = The selected value, eg "2014", "A", "O'Reilly and Associates Publishing, inc."
# %(selected_uri)s = For resource-type values, the underlying URI, eg "http://example.org/ext/publisher/oreilly"
def __init__(self,
rdftype=DCTERMS.title,
# any rdflib.URIRef -- should be called 'rdfpredicate'??
label=None, # toclabel
pagetitle=None,
indexingtype=None, # if not given, determined by rdftype
selector=None, # - "" -
key=None, # - "" -
identificator=None, # - "" - (normally same as selector)
toplevel_only=None, # - "" -
use_for_toc=None, # - "" -
use_for_feed=None, # - "" -
selector_descending=None,
key_descending=None,
multiple_values=None,
dimension_type=None, # could be determined by indexingtype
dimension_label=None
):
def _finddefault(provided, rdftype, argumenttype, default):
if provided is None:
if rdftype in self.defaults and argumenttype in self.defaults[rdftype]:
return self.defaults[rdftype][argumenttype]
else:
# since self.defaults doesn't contain meaningless
# defaults (like selector for rdf:type) it's not a
# good UI to warn about this. Might need to add
# more data to self.defaults in order to re-enable
# this.
# log = logging.getLogger(__name__)
# log.warning("Cannot map rdftype %s with argumenttype %s, defaulting to %r" %
# (rdftype, argumenttype, default))
return default
else:
return provided
self.rdftype = rdftype
self.label = _finddefault(label, rdftype, 'label', "Sorted by %(term)s")
self.pagetitle = _finddefault(
pagetitle,
rdftype,
'pagetitle',
"Documents where %(term)s = %(selected)s")
self.indexingtype = _finddefault(
indexingtype,
rdftype,
'indexingtype',
fulltextindex.Text())
self.selector = _finddefault(selector, rdftype, 'selector', self.defaultselector)
self.key = _finddefault(key, rdftype, 'key', self.defaultselector)
self.identificator = _finddefault(
identificator,
rdftype,
'identificator',
self.defaultidentificator)
self.toplevel_only = _finddefault(toplevel_only, rdftype, 'toplevel_only', False)
self.use_for_toc = _finddefault(use_for_toc, rdftype, 'use_for_toc', False)
self.use_for_feed = _finddefault(use_for_feed, rdftype, 'use_for_feed', False)
self.selector_descending = _finddefault(
selector_descending,
rdftype,
'selector_descending',
False)
self.key_descending = _finddefault(key_descending, rdftype, 'key_descending', False)
self.multiple_values = _finddefault(
multiple_values,
rdftype,
'multiple_values',
False)
self.dimension_type = _finddefault(dimension_type, rdftype, 'dimension_type', None)
# dimension_label should only be provided if an unusual
# selector for a rdftype is used (eg is_april_fools() for
# dcterms:issued), therefore no rdftype-dependent default.
self.dimension_label = dimension_label
def __repr__(self):
dictrepr = "".join(
(" %s=%r" %
(k, v) for k, v in sorted(
self.__dict__.items()) if not callable(v)))
return ("<%s%s>" % (self.__class__.__name__, dictrepr))
def __eq__(self, other):
# compare only those properties that affects the SET of
# selected data using this facet
return (self.rdftype == other.rdftype and
self.dimension_type == other.dimension_type and
self.dimension_label == other.dimension_label and
self.selector == other.selector)
Facet.defaults = {RDF.type: {
'indexingtype': fulltextindex.URI(),
'toplevel_only': False,
'use_for_toc': False,
'use_for_feed': True,
'selector': Facet.resourcelabel_or_qname,
'identificator': Facet.term,
'dimension_type': "term",
'pagetitle': 'All %(selected)s documents'},
DCTERMS.title: {
'indexingtype': fulltextindex.Text(boost=4),
'toplevel_only': False,
'use_for_toc': True,
'selector': Facet.firstletter,
'key': Facet.titlesortkey,
'identificator': Facet.firstletter,
'dimension_type': None, # or "value",
'pagetitle': 'Documents starting with "%(selected)s"'
},
DCTERMS.identifier: {
'indexingtype': fulltextindex.Label(boost=16),
'toplevel_only': False,
'use_for_toc': False, # typically no info that isn't already in title
'selector': Facet.firstletter,
'key': Facet.titlesortkey,
'identificator': Facet.firstletter,
},
DCTERMS.abstract: {
'indexingtype': fulltextindex.Text(boost=2),
'toplevel_only': True,
'use_for_toc': False
},
DC.creator: {
'indexingtype': fulltextindex.Label(),
'toplevel_only': True,
'use_for_toc': True,
'selector': Facet.defaultselector,
'key': Facet.titlesortkey,
'dimension_type': "value"
},
DCTERMS.publisher: {
'indexingtype': fulltextindex.Resource(),
'toplevel_only': True,
'use_for_toc': True,
'use_for_feed': True,
'selector': Facet.resourcelabel,
'key': Facet.resourcelabel,
'identificator': Facet.term,
'dimension_type': 'ref',
'pagetitle': 'Documents published by %(selected)s'
},
DCTERMS.references: { # NB: this is a single URI reference w/o label
'indexingtype': fulltextindex.URI(),
'use_for_toc': False,
},
DCTERMS.issued: {
'label': "Sorted by publication year",
'pagetitle': "Documents published in %(selected)s",
'indexingtype': fulltextindex.Datetime(),
'toplevel_only': True,
'use_for_toc': True,
'selector': Facet.year,
'key': Facet.defaultselector,
'identificator': Facet.year,
'selector_descending': False,
'key_descending': False,
'dimension_type': "year"
},
DC.subject: {
# eg. one or more string literals (not URIRefs),
'indexingtype': fulltextindex.Keyword(),
'multiple_values': True,
'toplevel_only': True,
'use_for_toc': True,
'selector': Facet.defaultselector,
'key': Facet.defaultselector,
'multiple_values': True,
'dimension_type': 'value',
},
DCTERMS.subject: {
# eg. one or more URIRefs + labels
'indexingtype': fulltextindex.Resource(),
'multiple_values': True,
'toplevel_only': True,
'use_for_toc': True,
'selector': Facet.resourcelabel,
'key': Facet.resourcelabel,
'identificator': Facet.term,
'multiple_values': True,
'dimension_type': 'ref',
},
SCHEMA.free: { # "A flag to signal that the publication is accessible for free."
'indexingtype': fulltextindex.Boolean(),
'toplevel_only': True,
'use_for_toc': True,
'use_for_feed': True,
'selector': Facet.booleanvalue,
'key': Facet.defaultselector,
'dimension_type': 'value'
}
}
| 45.247689 | 137 | 0.536296 |
383e21e7b51acc53c483ed3ff1c5303935ea2bf7
| 9,632 |
py
|
Python
|
nni/retiarii/nn/pytorch/component.py
|
Davidxswang/nni
|
270a36264515f1576071f07e741fccd5333434ff
|
[
"MIT"
] | 1 |
2020-12-14T08:27:04.000Z
|
2020-12-14T08:27:04.000Z
|
nni/retiarii/nn/pytorch/component.py
|
Davidxswang/nni
|
270a36264515f1576071f07e741fccd5333434ff
|
[
"MIT"
] | 12 |
2021-08-21T08:43:09.000Z
|
2022-02-22T07:51:53.000Z
|
nni/retiarii/nn/pytorch/component.py
|
Davidxswang/nni
|
270a36264515f1576071f07e741fccd5333434ff
|
[
"MIT"
] | 1 |
2021-07-01T07:04:28.000Z
|
2021-07-01T07:04:28.000Z
|
import copy
from collections import OrderedDict
from typing import Callable, List, Union, Tuple, Optional
import torch
import torch.nn as nn
from .api import LayerChoice, InputChoice
from .nn import ModuleList
from .nasbench101 import NasBench101Cell, NasBench101Mutator
from .utils import generate_new_label, get_fixed_value
from ...utils import NoContextError
__all__ = ['Repeat', 'Cell', 'NasBench101Cell', 'NasBench101Mutator', 'NasBench201Cell']
class Repeat(nn.Module):
"""
Repeat a block by a variable number of times.
Parameters
----------
blocks : function, list of function, module or list of module
The block to be repeated. If not a list, it will be replicated into a list.
If a list, it should be of length ``max_depth``, the modules will be instantiated in order and a prefix will be taken.
If a function, it will be called to instantiate a module. Otherwise the module will be deep-copied.
depth : int or tuple of int
If one number, the block will be repeated by a fixed number of times. If a tuple, it should be (min, max),
meaning that the block will be repeated at least `min` times and at most `max` times.
"""
def __new__(cls, blocks: Union[Callable[[], nn.Module], List[Callable[[], nn.Module]], nn.Module, List[nn.Module]],
depth: Union[int, Tuple[int, int]], label: Optional[str] = None):
try:
repeat = get_fixed_value(label)
return nn.Sequential(*cls._replicate_and_instantiate(blocks, repeat))
except NoContextError:
return super().__new__(cls)
def __init__(self,
blocks: Union[Callable[[], nn.Module], List[Callable[[], nn.Module]], nn.Module, List[nn.Module]],
depth: Union[int, Tuple[int, int]], label: Optional[str] = None):
super().__init__()
self._label = generate_new_label(label)
self.min_depth = depth if isinstance(depth, int) else depth[0]
self.max_depth = depth if isinstance(depth, int) else depth[1]
assert self.max_depth >= self.min_depth > 0
self.blocks = nn.ModuleList(self._replicate_and_instantiate(blocks, self.max_depth))
@property
def label(self):
return self._label
def forward(self, x):
for block in self.blocks:
x = block(x)
return x
@staticmethod
def _replicate_and_instantiate(blocks, repeat):
if not isinstance(blocks, list):
if isinstance(blocks, nn.Module):
blocks = [blocks] + [copy.deepcopy(blocks) for _ in range(repeat - 1)]
else:
blocks = [blocks for _ in range(repeat)]
assert len(blocks) > 0
assert repeat <= len(blocks), f'Not enough blocks to be used. {repeat} expected, only found {len(blocks)}.'
blocks = blocks[:repeat]
if not isinstance(blocks[0], nn.Module):
blocks = [b() for b in blocks]
return blocks
class Cell(nn.Module):
"""
Cell structure [zophnas]_ [zophnasnet]_ that is popularly used in NAS literature.
A cell consists of multiple "nodes". Each node is a sum of multiple operators. Each operator is chosen from
``op_candidates``, and takes one input from previous nodes and predecessors. Predecessor means the input of cell.
The output of cell is the concatenation of some of the nodes in the cell (currently all the nodes).
Parameters
----------
op_candidates : function or list of module
A list of modules to choose from, or a function that returns a list of modules.
num_nodes : int
Number of nodes in the cell.
num_ops_per_node: int
Number of operators in each node. The output of each node is the sum of all operators in the node. Default: 1.
num_predecessors : int
Number of inputs of the cell. The input to forward should be a list of tensors. Default: 1.
merge_op : str
Currently only ``all`` is supported, which has slight difference with that described in reference. Default: all.
label : str
Identifier of the cell. Cell sharing the same label will semantically share the same choice.
References
----------
.. [zophnas] Barret Zoph, Quoc V. Le, "Neural Architecture Search with Reinforcement Learning". https://arxiv.org/abs/1611.01578
.. [zophnasnet] Barret Zoph, Vijay Vasudevan, Jonathon Shlens, Quoc V. Le,
"Learning Transferable Architectures for Scalable Image Recognition". https://arxiv.org/abs/1707.07012
"""
# TODO:
# Support loose end concat (shape inference on the following cells)
# How to dynamically create convolution with stride as the first node
def __init__(self,
op_candidates: Union[Callable, List[nn.Module]],
num_nodes: int,
num_ops_per_node: int = 1,
num_predecessors: int = 1,
merge_op: str = 'all',
label: str = None):
super().__init__()
self._label = generate_new_label(label)
self.ops = ModuleList()
self.inputs = ModuleList()
self.num_nodes = num_nodes
self.num_ops_per_node = num_ops_per_node
self.num_predecessors = num_predecessors
for i in range(num_nodes):
self.ops.append(ModuleList())
self.inputs.append(ModuleList())
for k in range(num_ops_per_node):
if isinstance(op_candidates, list):
assert len(op_candidates) > 0 and isinstance(op_candidates[0], nn.Module)
ops = copy.deepcopy(op_candidates)
else:
ops = op_candidates()
self.ops[-1].append(LayerChoice(ops, label=f'{self.label}__op_{i}_{k}'))
self.inputs[-1].append(InputChoice(i + num_predecessors, 1, label=f'{self.label}/input_{i}_{k}'))
assert merge_op in ['all'] # TODO: loose_end
self.merge_op = merge_op
@property
def label(self):
return self._label
def forward(self, x: List[torch.Tensor]):
states = x
for ops, inps in zip(self.ops, self.inputs):
current_state = []
for op, inp in zip(ops, inps):
current_state.append(op(inp(states)))
current_state = torch.sum(torch.stack(current_state), 0)
states.append(current_state)
return torch.cat(states[self.num_predecessors:], 1)
class NasBench201Cell(nn.Module):
"""
Cell structure that is proposed in NAS-Bench-201 [nasbench201]_ .
This cell is a densely connected DAG with ``num_tensors`` nodes, where each node is tensor.
For every i < j, there is an edge from i-th node to j-th node.
Each edge in this DAG is associated with an operation transforming the hidden state from the source node
to the target node. All possible operations are selected from a predefined operation set, defined in ``op_candidates``.
Each of the ``op_candidates`` should be a callable that accepts input dimension and output dimension,
and returns a ``Module``.
Input of this cell should be of shape :math:`[N, C_{in}, *]`, while output should be :math:`[N, C_{out}, *]`. For example,
The space size of this cell would be :math:`|op|^{N(N-1)/2}`, where :math:`|op|` is the number of operation candidates,
and :math:`N` is defined by ``num_tensors``.
Parameters
----------
op_candidates : list of callable
Operation candidates. Each should be a function accepts input feature and output feature, returning nn.Module.
in_features : int
Input dimension of cell.
out_features : int
Output dimension of cell.
num_tensors : int
Number of tensors in the cell (input included). Default: 4
label : str
Identifier of the cell. Cell sharing the same label will semantically share the same choice.
References
----------
.. [nasbench201] Dong, X. and Yang, Y., 2020. Nas-bench-201: Extending the scope of reproducible neural architecture search.
arXiv preprint arXiv:2001.00326.
"""
@staticmethod
def _make_dict(x):
if isinstance(x, list):
return OrderedDict([(str(i), t) for i, t in enumerate(x)])
return OrderedDict(x)
def __init__(self, op_candidates: List[Callable[[int, int], nn.Module]],
in_features: int, out_features: int, num_tensors: int = 4,
label: Optional[str] = None):
super().__init__()
self._label = generate_new_label(label)
self.layers = nn.ModuleList()
self.in_features = in_features
self.out_features = out_features
self.num_tensors = num_tensors
op_candidates = self._make_dict(op_candidates)
for tid in range(1, num_tensors):
node_ops = nn.ModuleList()
for j in range(tid):
inp = in_features if j == 0 else out_features
op_choices = OrderedDict([(key, cls(inp, out_features))
for key, cls in op_candidates.items()])
node_ops.append(LayerChoice(op_choices, label=f'{self._label}__{j}_{tid}'))
self.layers.append(node_ops)
def forward(self, inputs):
tensors = [inputs]
for layer in self.layers:
current_tensor = []
for i, op in enumerate(layer):
current_tensor.append(op(tensors[i]))
current_tensor = torch.sum(torch.stack(current_tensor), 0)
tensors.append(current_tensor)
return tensors[-1]
| 42.808889 | 132 | 0.637978 |
cb91181e424805d84ec5e09fd8264efb5889899c
| 3,459 |
py
|
Python
|
frappe/custom/doctype/custom_field/custom_field.py
|
rohitwaghchaure/frappe
|
9414bec421496eab66ea96ff8199d388bfca019c
|
[
"MIT"
] | 2 |
2021-08-28T06:08:17.000Z
|
2021-09-06T10:41:43.000Z
|
frappe/custom/doctype/custom_field/custom_field.py
|
rohitwaghchaure/frappe
|
9414bec421496eab66ea96ff8199d388bfca019c
|
[
"MIT"
] | null | null | null |
frappe/custom/doctype/custom_field/custom_field.py
|
rohitwaghchaure/frappe
|
9414bec421496eab66ea96ff8199d388bfca019c
|
[
"MIT"
] | 1 |
2018-03-21T16:13:12.000Z
|
2018-03-21T16:13:12.000Z
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe.utils import cstr
from frappe import _
from frappe.model.document import Document
class CustomField(Document):
def autoname(self):
self.set_fieldname()
self.name = self.dt + "-" + self.fieldname
def set_fieldname(self):
if not self.fieldname:
if not self.label:
frappe.throw(_("Label is mandatory"))
# remove special characters from fieldname
self.fieldname = filter(lambda x: x.isdigit() or x.isalpha() or '_',
cstr(self.label).lower().replace(' ','_'))
# fieldnames should be lowercase
self.fieldname = self.fieldname.lower()
def validate(self):
meta = frappe.get_meta(self.dt)
fieldnames = [df.fieldname for df in meta.get("fields")]
if self.insert_after and self.insert_after in fieldnames:
self.idx = fieldnames.index(self.insert_after) + 1
if not self.idx:
self.idx = len(fieldnames) + 1
self._old_fieldtype = self.db_get('fieldtype')
if not self.fieldname:
frappe.throw(_("Fieldname not set for Custom Field"))
def on_update(self):
frappe.clear_cache(doctype=self.dt)
if not self.flags.ignore_validate:
# validate field
from frappe.core.doctype.doctype.doctype import validate_fields_for_doctype
validate_fields_for_doctype(self.dt)
# update the schema
if not frappe.db.get_value('DocType', self.dt, 'issingle'):
if (self.fieldname not in frappe.db.get_table_columns(self.dt)
or getattr(self, "_old_fieldtype", None) != self.fieldtype):
from frappe.model.db_schema import updatedb
updatedb(self.dt)
def on_trash(self):
# delete property setter entries
frappe.db.sql("""\
DELETE FROM `tabProperty Setter`
WHERE doc_type = %s
AND field_name = %s""",
(self.dt, self.fieldname))
frappe.clear_cache(doctype=self.dt)
def validate_insert_after(self, meta):
if not meta.get_field(self.insert_after):
frappe.throw(_("Insert After field '{0}' mentioned in Custom Field '{1}', with label '{2}', does not exist")
.format(self.insert_after, self.name, self.label), frappe.DoesNotExistError)
if self.fieldname == self.insert_after:
frappe.throw(_("Insert After cannot be set as {0}").format(meta.get_label(self.insert_after)))
@frappe.whitelist()
def get_fields_label(doctype=None):
return [{"value": df.fieldname or "", "label": _(df.label or "")} for df in frappe.get_meta(doctype).get("fields")]
def create_custom_field_if_values_exist(doctype, df):
df = frappe._dict(df)
if df.fieldname in frappe.db.get_table_columns(doctype) and \
frappe.db.sql("""select count(*) from `tab{doctype}`
where ifnull({fieldname},'')!=''""".format(doctype=doctype, fieldname=df.fieldname))[0][0]:
create_custom_field(doctype, df)
def create_custom_field(doctype, df):
df = frappe._dict(df)
if not frappe.db.get_value("Custom Field", {"dt": doctype, "fieldname": df.fieldname}):
frappe.get_doc({
"doctype":"Custom Field",
"dt": doctype,
"permlevel": df.permlevel or 0,
"label": df.label,
"fieldname": df.fieldname or df.label.lower().replace(' ', '_'),
"fieldtype": df.fieldtype,
"options": df.options,
"insert_after": df.insert_after,
"print_hide": df.print_hide,
"hidden": df.hidden or 0
}).insert()
@frappe.whitelist()
def add_custom_field(doctype, df):
df = json.loads(df)
return create_custom_field(doctype, df)
| 32.632075 | 116 | 0.716103 |
587e5c56288ed24a71d62f73ebe89a7d326cde96
| 1,520 |
py
|
Python
|
hmkit/autoapi/properties/value/position.py
|
highmobility/hmkit-python
|
2ac06ed021b57014f5290eaece19a9399d52df48
|
[
"MIT"
] | 1 |
2021-08-01T20:35:57.000Z
|
2021-08-01T20:35:57.000Z
|
hmkit/autoapi/properties/value/position.py
|
highmobility/hmkit-python
|
2ac06ed021b57014f5290eaece19a9399d52df48
|
[
"MIT"
] | null | null | null |
hmkit/autoapi/properties/value/position.py
|
highmobility/hmkit-python
|
2ac06ed021b57014f5290eaece19a9399d52df48
|
[
"MIT"
] | null | null | null |
"""
The MIT License
Copyright (c) 2014- High-Mobility GmbH (https://high-mobility.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from enum import Enum, unique
import logging
log = logging.getLogger('hmkit.autoapi')
@unique
class Position(Enum):
"""
Enum Class for Position of Door
"""
CLOSED = 0x00
OPEN = 0x01
INTERMEDIATE = 0x02
'''
def __init__(self, value):
#print("PY: Init value Location")
self.value = value
return
def getByte(self):
return self.value
'''
| 31.020408 | 77 | 0.745395 |
d3849f0d902e5734e919993f62231ca496ec8415
| 2,565 |
py
|
Python
|
setup.py
|
minrk/tornado
|
a4279d24a9cd7dacfa3f9aa410a5272ed26ba8bb
|
[
"CC-BY-3.0",
"Apache-2.0"
] | null | null | null |
setup.py
|
minrk/tornado
|
a4279d24a9cd7dacfa3f9aa410a5272ed26ba8bb
|
[
"CC-BY-3.0",
"Apache-2.0"
] | null | null | null |
setup.py
|
minrk/tornado
|
a4279d24a9cd7dacfa3f9aa410a5272ed26ba8bb
|
[
"CC-BY-3.0",
"Apache-2.0"
] | 1 |
2017-07-26T10:34:41.000Z
|
2017-07-26T10:34:41.000Z
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import distutils.core
import sys
# Importing setuptools adds some features like "setup.py develop", but
# it's optional so swallow the error if it's not there.
try:
import setuptools
except ImportError:
pass
kwargs = {}
version = "3.1.dev2"
with open('README.rst') as f:
long_description = f.read()
distutils.core.setup(
name="tornado",
version=version,
packages = ["tornado", "tornado.test", "tornado.platform"],
package_data = {
"tornado": ["ca-certificates.crt"],
# data files need to be listed both here (which determines what gets
# installed) and in MANIFEST.in (which determines what gets included
# in the sdist tarball)
"tornado.test": [
"README",
"csv_translations/fr_FR.csv",
"gettext_translations/fr_FR/LC_MESSAGES/tornado_test.mo",
"gettext_translations/fr_FR/LC_MESSAGES/tornado_test.po",
"options_test.cfg",
"static/robots.txt",
"templates/utf8.html",
"test.crt",
"test.key",
],
},
author="Facebook",
author_email="[email protected]",
url="http://www.tornadoweb.org/",
license="http://www.apache.org/licenses/LICENSE-2.0",
description="Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed.",
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
long_description=long_description,
**kwargs
)
| 35.136986 | 125 | 0.655361 |
9b434b3d9c64e62bbc1d689f1247e9321ecfd2b7
| 6,330 |
py
|
Python
|
flask_jwt_extended/tokens.py
|
Freshwood/flask-jwt-extended
|
5485fce359cfe0f9713364b577d2aa4b521c5ea8
|
[
"MIT"
] | null | null | null |
flask_jwt_extended/tokens.py
|
Freshwood/flask-jwt-extended
|
5485fce359cfe0f9713364b577d2aa4b521c5ea8
|
[
"MIT"
] | null | null | null |
flask_jwt_extended/tokens.py
|
Freshwood/flask-jwt-extended
|
5485fce359cfe0f9713364b577d2aa4b521c5ea8
|
[
"MIT"
] | null | null | null |
import datetime
import uuid
from calendar import timegm
import jwt
from werkzeug.security import safe_str_cmp
from flask_jwt_extended.exceptions import JWTDecodeError, CSRFError
def _create_csrf_token():
return str(uuid.uuid4())
def _encode_jwt(additional_token_data, expires_delta, secret, algorithm,
json_encoder=None):
uid = _create_csrf_token()
now = datetime.datetime.utcnow()
token_data = {
'iat': now,
'nbf': now,
'jti': uid,
}
# If expires_delta is False, the JWT should never expire
# and the 'exp' claim is not set.
if expires_delta:
token_data['exp'] = now + expires_delta
token_data.update(additional_token_data)
encoded_token = jwt.encode(token_data, secret, algorithm,
json_encoder=json_encoder).decode('utf-8')
return encoded_token
def encode_access_token(identity, secret, algorithm, expires_delta, fresh,
user_claims, csrf, identity_claim_key, user_claims_key,
json_encoder=None):
"""
Creates a new encoded (utf-8) access token.
:param identity: Identifier for who this token is for (ex, username). This
data must be json serializable
:param secret: Secret key to encode the JWT with
:param algorithm: Which algorithm to encode this JWT with
:param expires_delta: How far in the future this token should expire
(set to False to disable expiration)
:type expires_delta: datetime.timedelta or False
:param fresh: If this should be a 'fresh' token or not. If a
datetime.timedelta is given this will indicate how long this
token will remain fresh.
:param user_claims: Custom claims to include in this token. This data must
be json serializable
:param csrf: Whether to include a csrf double submit claim in this token
(boolean)
:param identity_claim_key: Which key should be used to store the identity
:param user_claims_key: Which key should be used to store the user claims
:return: Encoded access token
"""
if isinstance(fresh, datetime.timedelta):
now = datetime.datetime.utcnow()
fresh = timegm((now + fresh).utctimetuple())
token_data = {
identity_claim_key: identity,
'fresh': fresh,
'type': 'access',
}
# Don't add extra data to the token if user_claims is empty.
if user_claims:
token_data[user_claims_key] = user_claims
if csrf:
token_data['csrf'] = _create_csrf_token()
return _encode_jwt(token_data, expires_delta, secret, algorithm,
json_encoder=json_encoder)
def encode_refresh_token(identity, secret, algorithm, expires_delta, user_claims,
csrf, identity_claim_key, user_claims_key,
json_encoder=None):
"""
Creates a new encoded (utf-8) refresh token.
:param identity: Some identifier used to identify the owner of this token
:param secret: Secret key to encode the JWT with
:param algorithm: Which algorithm to use for the toek
:param expires_delta: How far in the future this token should expire
(set to False to disable expiration)
:type expires_delta: datetime.timedelta or False
:param user_claims: Custom claims to include in this token. This data must
be json serializable
:param csrf: Whether to include a csrf double submit claim in this token
(boolean)
:param identity_claim_key: Which key should be used to store the identity
:param user_claims_key: Which key should be used to store the user claims
:return: Encoded refresh token
"""
token_data = {
identity_claim_key: identity,
'type': 'refresh',
}
# Don't add extra data to the token if user_claims is empty.
if user_claims:
token_data[user_claims_key] = user_claims
if csrf:
token_data['csrf'] = _create_csrf_token()
return _encode_jwt(token_data, expires_delta, secret, algorithm,
json_encoder=json_encoder)
def decode_jwt(encoded_token, secret, algorithm, identity_claim_key,
user_claims_key, csrf_value=None, audience=None,
leeway=0, allow_expired=False, verify_audience=False):
"""
Decodes an encoded JWT
:param encoded_token: The encoded JWT string to decode
:param secret: Secret key used to encode the JWT
:param algorithm: Algorithm used to encode the JWT
:param identity_claim_key: expected key that contains the identity
:param user_claims_key: expected key that contains the user claims
:param csrf_value: Expected double submit csrf value
:param audience: expected audience in the JWT
:param leeway: optional leeway to add some margin around expiration times
:param allow_expired: Options to ignore exp claim validation in token
:param verify_audience: Options to ignore aud claim validation in token
:return: Dictionary containing contents of the JWT
"""
options = {}
if allow_expired:
options['verify_exp'] = False
options['verify_aud'] = verify_audience
# This call verifies the ext, iat, nbf, and aud claims
data = jwt.decode(encoded_token, secret, algorithms=[algorithm], audience=audience,
leeway=leeway, options=options)
# Make sure that any custom claims we expect in the token are present
if 'jti' not in data:
data['jti'] = None
if identity_claim_key not in data:
raise JWTDecodeError("Missing claim: {}".format(identity_claim_key))
if 'type' not in data:
data['type'] = 'access'
if data['type'] not in ('refresh', 'access'):
raise JWTDecodeError("Missing or invalid claim: type")
if data['type'] == 'access':
if 'fresh' not in data:
data['fresh'] = False
if user_claims_key not in data:
data[user_claims_key] = {}
if csrf_value:
if 'csrf' not in data:
raise JWTDecodeError("Missing claim: csrf")
if not safe_str_cmp(data['csrf'], csrf_value):
raise CSRFError("CSRF double submit tokens do not match")
return data
| 39.074074 | 87 | 0.663981 |
d7238bb1945d00b86cec29999ad9796cc5ac2fbb
| 1,288 |
py
|
Python
|
jigs/trio_mapper/source/sync_openmm.py
|
gitter-badger/wepy-1
|
9bc619aeae178ad5d10f658fae2abfd2c7aeb18a
|
[
"MIT"
] | 35 |
2017-08-22T15:39:06.000Z
|
2022-03-20T15:17:52.000Z
|
jigs/trio_mapper/source/sync_openmm.py
|
gitter-badger/wepy-1
|
9bc619aeae178ad5d10f658fae2abfd2c7aeb18a
|
[
"MIT"
] | 33 |
2017-10-02T22:04:45.000Z
|
2022-03-02T22:19:08.000Z
|
jigs/trio_mapper/source/sync_openmm.py
|
stxinsite/wepy
|
352d4c1316b20e839aae8824eedd66f0f2d0b456
|
[
"MIT"
] | 17 |
2018-07-14T15:33:30.000Z
|
2022-01-18T16:30:55.000Z
|
from openmm_systems.test_systems import (
LennardJonesPair,
LysozymeImplicit,
)
import simtk.openmm.app as omma
import simtk.openmm as omm
import simtk.unit as unit
from wepy.runners.openmm import gen_sim_state
import time
def create_sim():
test_sys = LysozymeImplicit()
integrator = omm.LangevinIntegrator(300.0*unit.kelvin,
1/unit.picosecond,
0.002*unit.picoseconds)
init_state = gen_sim_state(test_sys.positions, test_sys.system, integrator)
platform = omm.Platform.getPlatformByName('CPU')
simulation = omma.Simulation(
test_sys.topology,
test_sys.system,
integrator,
platform=platform,
)
simulation.context.setState(init_state)
return simulation
def run_sim(sim, steps):
sim.integrator.step(steps)
return sim
def main():
num_sims = 2
steps = 5000
simulations = []
for idx in range(num_sims):
simulations.append(create_sim())
for i, sim in enumerate(simulations):
start = time.time()
run_sim(sim, steps)
end = time.time()
print(f"Sim {i} took: {end - start}")
start = time.time()
main()
end = time.time()
print(f"Took {end - start} seconds")
| 19.815385 | 79 | 0.631211 |
9d720f0379c37b62132070891659e9d63863d5bf
| 786 |
py
|
Python
|
predict.py
|
AR-Elbers/Heroku-Demo
|
81af0c05bd0fac88df2d7fd58917d42accb29d47
|
[
"MIT"
] | null | null | null |
predict.py
|
AR-Elbers/Heroku-Demo
|
81af0c05bd0fac88df2d7fd58917d42accb29d47
|
[
"MIT"
] | null | null | null |
predict.py
|
AR-Elbers/Heroku-Demo
|
81af0c05bd0fac88df2d7fd58917d42accb29d47
|
[
"MIT"
] | null | null | null |
from flask_restful import Api, Resource, reqparse
import numpy as np
from sklearn.externals import joblib
# Load prebuilt model
IRIS_MODEL = joblib.load('iris.smd')
# Create predict method
class Predict(Resource):
@staticmethod
def post():
parser = reqparse.RequestParser()
parser.add_argument('petal_length')
parser.add_argument('petal_width')
parser.add_argument('sepal_length')
parser.add_argument('sepal_width')
# Use parser to create dictionary of data input
args = parser.parse_args()
# Convert input data to array
X_new = np.fromiter(args.values(), dtype=float)
# Generate prediction for a single value
out = {'Prediction': IRIS_MODEL.predict([X_new])[0]}
return out, 200
| 34.173913 | 60 | 0.678117 |
9311916fe59869938397de6da4b455270ae79347
| 1,046 |
py
|
Python
|
src/cli_client.py
|
Jonnyneill/talking-clock
|
e95020ecc2f13058ac09294f1425b1ee840ac25e
|
[
"MIT"
] | null | null | null |
src/cli_client.py
|
Jonnyneill/talking-clock
|
e95020ecc2f13058ac09294f1425b1ee840ac25e
|
[
"MIT"
] | null | null | null |
src/cli_client.py
|
Jonnyneill/talking-clock
|
e95020ecc2f13058ac09294f1425b1ee840ac25e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
import re
import sys
from datetime import datetime
from clock import talk
def main():
"""
Convert numeric time to human time and write the result to std out
"""
parser = argparse.ArgumentParser(description="Convert a numeric time to words")
parser.add_argument(
"-t", "--numeric-time", type=str, default=datetime.now().strftime("%H:%M"), help="The time to convert to words"
)
args = parser.parse_args()
if re.search('[a-zA-Z]', args.numeric_time):
log_and_exit("Error: Provided time [{}] contains non numeric characters".format(args.numeric_time), 1)
try:
human_time = talk(args.numeric_time)
log_and_exit(human_time, 0)
except ValueError:
log_and_exit("Error: Numeric time [{}] is not in a valid format".format(args.numeric_time), 1)
def log_and_exit(message, code):
if code == 0:
sys.stdout.write(message + "\n")
else:
sys.stderr.write(message + "\n")
sys.stdout.flush()
sys.exit(code)
| 27.526316 | 119 | 0.656788 |
95905bef888b0d487fdacfebaf40de1a8237dbea
| 2,006 |
py
|
Python
|
services/workshop/crapi/mechanic/models.py
|
nonamesec/crAPI
|
5863ffad52bb93248587ad1999a693220f236501
|
[
"Apache-2.0",
"0BSD"
] | null | null | null |
services/workshop/crapi/mechanic/models.py
|
nonamesec/crAPI
|
5863ffad52bb93248587ad1999a693220f236501
|
[
"Apache-2.0",
"0BSD"
] | null | null | null |
services/workshop/crapi/mechanic/models.py
|
nonamesec/crAPI
|
5863ffad52bb93248587ad1999a693220f236501
|
[
"Apache-2.0",
"0BSD"
] | null | null | null |
# Copyright 2020 Traceable, Inc.
#
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Models related to Mechanic
Mechanic and Service Request Models
"""
from django.db import models
from user.models import User, Vehicle
from collections import OrderedDict
from extended_choices import Choices
from django_db_cascade.fields import ForeignKey, OneToOneField
from django_db_cascade.deletions import DB_CASCADE
class Mechanic(models.Model):
"""
Mechanic Model
represents a mechanic for the application
"""
mechanic_code = models.CharField(max_length=100, null=False, unique=True)
user = ForeignKey(User, DB_CASCADE)
class Meta:
db_table = 'mechanic'
def __str__(self):
return f"<Mechanic: {self.mechanic_code}>"
class ServiceRequest(models.Model):
"""
Service Request Model
represents a service request in the application
"""
mechanic = ForeignKey(Mechanic, DB_CASCADE)
vehicle = ForeignKey(Vehicle, DB_CASCADE)
problem_details = models.CharField(max_length=500, blank=True)
created_on = models.DateTimeField()
updated_on = models.DateTimeField(null=True)
ssn="123-41-5234"
STATUS_CHOICES = Choices(
('PEN', "Pending", "Pending"),
('FIN', "Finished", "Finished")
)
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default=STATUS_CHOICES.PEN)
class Meta:
db_table = 'service_request'
def __str__(self):
return f'<ServiceRequest: {self.id}>'
| 29.5 | 96 | 0.721336 |
dfd84d3d31936e66bdbac8335c6eccd13beb949f
| 8,646 |
py
|
Python
|
octavia/api/v1/controllers/l7policy.py
|
BeaconFramework/Distributor
|
c9f8737063263ca69365679c8b76331766d63191
|
[
"Apache-2.0"
] | 1 |
2019-01-11T06:20:25.000Z
|
2019-01-11T06:20:25.000Z
|
octavia/api/v1/controllers/l7policy.py
|
BeaconFramework/Distributor
|
c9f8737063263ca69365679c8b76331766d63191
|
[
"Apache-2.0"
] | null | null | null |
octavia/api/v1/controllers/l7policy.py
|
BeaconFramework/Distributor
|
c9f8737063263ca69365679c8b76331766d63191
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Blue Box, an IBM Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import oslo_db.exception as oslo_exc
from oslo_utils import excutils
import pecan
from wsme import types as wtypes
from wsmeext import pecan as wsme_pecan
from octavia.api.v1.controllers import base
from octavia.api.v1.controllers import l7rule
from octavia.api.v1.types import l7policy as l7policy_types
from octavia.common import constants
from octavia.common import data_models
from octavia.common import exceptions
from octavia.common import validate
from octavia.db import prepare as db_prepare
from octavia.i18n import _LI
LOG = logging.getLogger(__name__)
class L7PolicyController(base.BaseController):
def __init__(self, load_balancer_id, listener_id):
super(L7PolicyController, self).__init__()
self.load_balancer_id = load_balancer_id
self.listener_id = listener_id
self.handler = self.handler.l7policy
@wsme_pecan.wsexpose(l7policy_types.L7PolicyResponse, wtypes.text)
def get(self, id):
"""Gets a single l7policy's details."""
context = pecan.request.context.get('octavia_context')
db_l7policy = self._get_db_l7policy(context.session, id)
return self._convert_db_to_type(db_l7policy,
l7policy_types.L7PolicyResponse)
@wsme_pecan.wsexpose([l7policy_types.L7PolicyResponse])
def get_all(self):
"""Lists all l7policies of a listener."""
context = pecan.request.context.get('octavia_context')
db_l7policies = self.repositories.l7policy.get_all(
context.session, listener_id=self.listener_id)
return self._convert_db_to_type(db_l7policies,
[l7policy_types.L7PolicyResponse])
def _test_lb_and_listener_statuses(self, session):
"""Verify load balancer is in a mutable state."""
if not self.repositories.test_and_set_lb_and_listeners_prov_status(
session, self.load_balancer_id,
constants.PENDING_UPDATE, constants.PENDING_UPDATE,
listener_ids=[self.listener_id]):
LOG.info(_LI("L7Policy cannot be created or modified because the "
"Load Balancer is in an immutable state"))
lb_repo = self.repositories.load_balancer
db_lb = lb_repo.get(session, id=self.load_balancer_id)
raise exceptions.ImmutableObject(resource=db_lb._name(),
id=self.load_balancer_id)
@wsme_pecan.wsexpose(l7policy_types.L7PolicyResponse,
body=l7policy_types.L7PolicyPOST, status_code=202)
def post(self, l7policy):
"""Creates a l7policy on a listener."""
context = pecan.request.context.get('octavia_context')
l7policy_dict = validate.sanitize_l7policy_api_args(
l7policy.to_dict(render_unsets=True), create=True)
# Make sure any pool specified by redirect_pool_id exists
if l7policy_dict.get('redirect_pool_id'):
self._get_db_pool(
context.session, l7policy_dict['redirect_pool_id'])
l7policy_dict = db_prepare.create_l7policy(l7policy_dict,
self.load_balancer_id,
self.listener_id)
self._test_lb_and_listener_statuses(context.session)
try:
db_l7policy = self.repositories.l7policy.create(context.session,
**l7policy_dict)
except oslo_exc.DBDuplicateEntry as de:
# Setting LB and Listener back to active because this is just a
# validation failure
self.repositories.load_balancer.update(
context.session, self.load_balancer_id,
provisioning_status=constants.ACTIVE)
self.repositories.listener.update(
context.session, self.listener_id,
provisioning_status=constants.ACTIVE)
if ['id'] == de.columns:
raise exceptions.IDAlreadyExists()
try:
LOG.info(_LI("Sending Creation of L7Policy %s to handler"),
db_l7policy.id)
self.handler.create(db_l7policy)
except Exception:
with excutils.save_and_reraise_exception(reraise=False):
self.repositories.listener.update(
context.session, self.listener_id,
operating_status=constants.ERROR)
db_l7policy = self._get_db_l7policy(context.session, db_l7policy.id)
return self._convert_db_to_type(db_l7policy,
l7policy_types.L7PolicyResponse)
@wsme_pecan.wsexpose(l7policy_types.L7PolicyResponse,
wtypes.text, body=l7policy_types.L7PolicyPUT,
status_code=202)
def put(self, id, l7policy):
"""Updates a l7policy."""
l7policy_dict = validate.sanitize_l7policy_api_args(
l7policy.to_dict(render_unsets=False))
context = pecan.request.context.get('octavia_context')
# Make sure any specified redirect_pool_id exists
if l7policy_dict.get('redirect_pool_id'):
self._get_db_pool(
context.session, l7policy_dict['redirect_pool_id'])
db_l7policy = self._get_db_l7policy(context.session, id)
self._test_lb_and_listener_statuses(context.session)
try:
LOG.info(_LI("Sending Update of L7Policy %s to handler"), id)
self.handler.update(
db_l7policy, l7policy_types.L7PolicyPUT(**l7policy_dict))
except Exception:
with excutils.save_and_reraise_exception(reraise=False):
self.repositories.listener.update(
context.session, self.listener_id,
operating_status=constants.ERROR)
db_l7policy = self._get_db_l7policy(context.session, id)
return self._convert_db_to_type(db_l7policy,
l7policy_types.L7PolicyResponse)
@wsme_pecan.wsexpose(None, wtypes.text, status_code=202)
def delete(self, id):
"""Deletes a l7policy."""
context = pecan.request.context.get('octavia_context')
db_l7policy = self._get_db_l7policy(context.session, id)
self._test_lb_and_listener_statuses(context.session)
try:
LOG.info(_LI("Sending Deletion of L7Policy %s to handler"),
db_l7policy.id)
self.handler.delete(db_l7policy)
except Exception:
with excutils.save_and_reraise_exception(reraise=False):
self.repositories.listener.update(
context.session, self.listener_id,
operating_status=constants.ERROR)
db_l7policy = self.repositories.l7policy.get(context.session, id=id)
return self._convert_db_to_type(db_l7policy,
l7policy_types.L7PolicyResponse)
@pecan.expose()
def _lookup(self, l7policy_id, *remainder):
"""Overridden pecan _lookup method for custom routing.
Verifies that the l7policy passed in the url exists, and if so decides
which controller, if any, should control be passed.
"""
context = pecan.request.context.get('octavia_context')
if l7policy_id and len(remainder) and remainder[0] == 'l7rules':
remainder = remainder[1:]
db_l7policy = self.repositories.l7policy.get(
context.session, id=l7policy_id)
if not db_l7policy:
LOG.info(_LI("L7Policy %s not found."), l7policy_id)
raise exceptions.NotFound(
resource=data_models.L7Policy._name(), id=l7policy_id)
return l7rule.L7RuleController(
load_balancer_id=self.load_balancer_id,
listener_id=self.listener_id,
l7policy_id=db_l7policy.id), remainder
| 46.235294 | 78 | 0.644344 |
8ad10722bb7e89fa9c4da5971c99d44493a47f65
| 730 |
py
|
Python
|
adv/alex.py
|
Zeiin/dl
|
bce5e239dc751baa9266aa5adbe7c8d078d8a9ac
|
[
"Apache-2.0"
] | null | null | null |
adv/alex.py
|
Zeiin/dl
|
bce5e239dc751baa9266aa5adbe7c8d078d8a9ac
|
[
"Apache-2.0"
] | null | null | null |
adv/alex.py
|
Zeiin/dl
|
bce5e239dc751baa9266aa5adbe7c8d078d8a9ac
|
[
"Apache-2.0"
] | null | null | null |
from core.advbase import *
from slot.a import *
from slot.d import *
def module():
return Alex
class Alex(Adv):
comment = 'not consider bk boost of her s2'
a1 = ('s',0.35,'hp100')
a3 = ('sp',0.05)
conf = {}
conf['slots.a'] = Twinfold_Bonds()+The_Plaguebringer()
conf['slots.poison.a'] = conf['slots.a']
conf['slots.d'] = Fatalis()
conf['acl'] = """
`s3, not self.s3_buff
`s1
`s2
`fs, x=5
"""
coab = ['Blade','Wand','Heinwald']
conf['afflict_res.poison'] = 0
def s1_proc(self, e):
self.afflics.poison(e.name,100,0.396)
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
| 23.548387 | 58 | 0.571233 |
cb4b7cbd5bd134f807594c2194e24c8fe6207294
| 686 |
py
|
Python
|
src/zope/session/__init__.py
|
zopefoundation/zope.session
|
22afb6348ba3c375ff7ddd56c105f1a7a1ea4439
|
[
"ZPL-2.1"
] | null | null | null |
src/zope/session/__init__.py
|
zopefoundation/zope.session
|
22afb6348ba3c375ff7ddd56c105f1a7a1ea4439
|
[
"ZPL-2.1"
] | 13 |
2015-06-02T17:10:25.000Z
|
2021-02-20T12:14:56.000Z
|
src/zope/session/__init__.py
|
zopefoundation/zope.session
|
22afb6348ba3c375ff7ddd56c105f1a7a1ea4439
|
[
"ZPL-2.1"
] | 2 |
2015-04-03T09:42:26.000Z
|
2015-05-28T16:37:14.000Z
|
##############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Core session interfaces and implementation
"""
| 42.875 | 78 | 0.590379 |
4f4ce2d19a1fc7b4023308e606e639c9c5a65a63
| 20,549 |
py
|
Python
|
flux_mito/model_62.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_mito/model_62.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_mito/model_62.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 5000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 200000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.134259 | 798 | 0.804127 |
e3871e59d5e356ed2bdbcdfc6d4d6e2ba4c939b2
| 91 |
py
|
Python
|
satwik.py
|
lamination123/fossotober
|
b81e6a7932b2cf5f8e8848d075982377bd95cbed
|
[
"MIT"
] | null | null | null |
satwik.py
|
lamination123/fossotober
|
b81e6a7932b2cf5f8e8848d075982377bd95cbed
|
[
"MIT"
] | null | null | null |
satwik.py
|
lamination123/fossotober
|
b81e6a7932b2cf5f8e8848d075982377bd95cbed
|
[
"MIT"
] | null | null | null |
print("satwik")
print("roll no: AM.EN.U4CSE19356")
print("batch:s1-cse-d")
print("bigbash")
| 22.75 | 34 | 0.703297 |
d6a40baae4f1cce4a8ac30995872ef829b1d306f
| 50,494 |
py
|
Python
|
menpo/shape/pointcloud.py
|
eosulliv/menpo
|
4c589a9c3ba103b98e6eb53bb12cbd692ccd4a9e
|
[
"BSD-3-Clause"
] | null | null | null |
menpo/shape/pointcloud.py
|
eosulliv/menpo
|
4c589a9c3ba103b98e6eb53bb12cbd692ccd4a9e
|
[
"BSD-3-Clause"
] | null | null | null |
menpo/shape/pointcloud.py
|
eosulliv/menpo
|
4c589a9c3ba103b98e6eb53bb12cbd692ccd4a9e
|
[
"BSD-3-Clause"
] | null | null | null |
import numbers
import warnings
from warnings import warn
import numpy as np
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
from scipy.sparse import csr_matrix
from scipy.spatial.distance import cdist
from menpo.transform import WithDims
from menpo.visualize import viewwrapper
from .base import Shape
def bounding_box(closest_to_origin, opposite_corner):
r"""
Return a bounding box from two corner points as a directed graph.
The the first point (0) should be nearest the origin.
In the case of an image, this ordering would appear as:
::
0<--3
| ^
| |
v |
1-->2
In the case of a pointcloud, the ordering will appear as:
::
3<--2
| ^
| |
v |
0-->1
Parameters
----------
closest_to_origin : (`float`, `float`)
Two floats representing the coordinates closest to the origin.
Represented by (0) in the graph above. For an image, this will
be the top left. For a pointcloud, this will be the bottom left.
opposite_corner : (`float`, `float`)
Two floats representing the coordinates opposite the corner closest
to the origin.
Represented by (2) in the graph above. For an image, this will
be the bottom right. For a pointcloud, this will be the top right.
Returns
-------
bounding_box : :map:`PointDirectedGraph`
The axis aligned bounding box from the two given corners.
"""
from .graph import PointDirectedGraph
if len(closest_to_origin) != 2 or len(opposite_corner) != 2:
raise ValueError("Only 2D bounding boxes can be created.")
adjacency_matrix = csr_matrix(([1] * 4, ([0, 1, 2, 3], [1, 2, 3, 0])), shape=(4, 4))
box = np.array(
[
closest_to_origin,
[opposite_corner[0], closest_to_origin[1]],
opposite_corner,
[closest_to_origin[0], opposite_corner[1]],
],
dtype=np.float,
)
return PointDirectedGraph(box, adjacency_matrix, copy=False)
def bounding_cuboid(near_closest_to_origin, far_opposite_corner):
r"""
Return a bounding cuboid from the near closest and far opposite
corners as a directed graph.
Parameters
----------
near_closest_to_origin : (`float`, `float`, `float`)
Three floats representing the coordinates of the near corner closest to
the origin.
far_opposite_corner : (`float`, `float`, `float`)
Three floats representing the coordinates of the far opposite corner
compared to near_closest_to_origin.
Returns
-------
bounding_box : :map:`PointDirectedGraph`
The axis aligned bounding cuboid from the two given corners.
"""
from .graph import PointDirectedGraph
if len(near_closest_to_origin) != 3 or len(far_opposite_corner) != 3:
raise ValueError("Only 3D bounding cuboids can be created.")
adjacency_matrix = csr_matrix(
(
[1] * 12,
(
[0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7],
[1, 2, 3, 0, 4, 5, 6, 7, 5, 6, 7, 4],
),
),
shape=(8, 8),
)
cuboid = np.array(
[
near_closest_to_origin,
[
far_opposite_corner[0],
near_closest_to_origin[1],
near_closest_to_origin[2],
],
[far_opposite_corner[0], far_opposite_corner[1], near_closest_to_origin[2]],
[
near_closest_to_origin[0],
far_opposite_corner[1],
near_closest_to_origin[2],
],
[
near_closest_to_origin[0],
near_closest_to_origin[1],
far_opposite_corner[2],
],
[far_opposite_corner[0], near_closest_to_origin[1], far_opposite_corner[2]],
far_opposite_corner,
[near_closest_to_origin[0], far_opposite_corner[1], far_opposite_corner[2]],
],
dtype=np.float,
)
return PointDirectedGraph(cuboid, adjacency_matrix, copy=False)
class PointCloud(Shape):
r"""
An N-dimensional point cloud. This is internally represented as an `ndarray`
of shape ``(n_points, n_dims)``. This class is important for dealing
with complex functionality such as viewing and representing metadata such
as landmarks.
Currently only 2D and 3D pointclouds are viewable.
Parameters
----------
points : ``(n_points, n_dims)`` `ndarray`
The array representing the points.
copy : `bool`, optional
If ``False``, the points will not be copied on assignment. Note that
this will miss out on additional checks. Further note that we still
demand that the array is C-contiguous - if it isn't, a copy will be
generated anyway.
In general this should only be used if you know what you are doing.
"""
def __init__(self, points, copy=True):
super(PointCloud, self).__init__()
if not copy:
if not points.flags.c_contiguous:
warn(
"The copy flag was NOT honoured. A copy HAS been made. "
"Please ensure the data you pass is C-contiguous."
)
points = np.array(points, copy=True, order="C")
else:
points = np.array(points, copy=True, order="C")
self.points = points
@classmethod
def init_2d_grid(cls, shape, spacing=None):
r"""
Create a pointcloud that exists on a regular 2D grid. The first
dimension is the number of rows in the grid and the second dimension
of the shape is the number of columns. ``spacing`` optionally allows
the definition of the distance between points (uniform over points).
The spacing may be different for rows and columns.
Parameters
----------
shape : `tuple` of 2 `int`
The size of the grid to create, this defines the number of points
across each dimension in the grid. The first element is the number
of rows and the second is the number of columns.
spacing : `int` or `tuple` of 2 `int`, optional
The spacing between points. If a single `int` is provided, this
is applied uniformly across each dimension. If a `tuple` is
provided, the spacing is applied non-uniformly as defined e.g.
``(2, 3)`` gives a spacing of 2 for the rows and 3 for the
columns.
Returns
-------
shape_cls : `type(cls)`
A PointCloud or subclass arranged in a grid.
"""
if len(shape) != 2:
raise ValueError("shape must be 2D.")
grid = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing="ij")
points = np.require(
np.concatenate(grid).reshape([2, -1]).T,
dtype=np.float64,
requirements=["C"],
)
if spacing is not None:
if not (
isinstance(spacing, numbers.Number)
or isinstance(spacing, collections_abc.Sequence)
):
raise ValueError(
"spacing must be either a single number "
"to be applied over each dimension, or a 2D "
"sequence of numbers."
)
if isinstance(spacing, collections_abc.Sequence) and len(spacing) != 2:
raise ValueError("spacing must be 2D.")
points *= np.asarray(spacing, dtype=np.float64)
return cls(points, copy=False)
@classmethod
def init_from_depth_image(cls, depth_image):
r"""
Return a 3D point cloud from the given depth image. The depth image
is assumed to represent height/depth values and the XY coordinates
are assumed to unit spaced and represent image coordinates. This is
particularly useful for visualising depth values that have been
recovered from images.
Parameters
----------
depth_image : :map:`Image` or subclass
A single channel image that contains depth values - as commonly
returned by RGBD cameras, for example.
Returns
-------
depth_cloud : ``type(cls)``
A new 3D PointCloud with unit XY coordinates and the given depth
values as Z coordinates.
"""
from menpo.image import MaskedImage
new_pcloud = cls.init_2d_grid(depth_image.shape)
if isinstance(depth_image, MaskedImage):
new_pcloud = new_pcloud.from_mask(depth_image.mask.as_vector())
return cls(
np.hstack([new_pcloud.points, depth_image.as_vector(keep_channels=True).T]),
copy=False,
)
def with_dims(self, dims):
r"""
Return a copy of this shape with only particular dimensions retained.
Parameters
----------
dims : valid numpy array slice
The slice that will be used on the dimensionality axis of the shape
under transform. For example, to go from a 3D shape to a 2D one,
[0, 1] could be provided or np.array([True, True, False]).
Returns
-------
copy of self, with only the requested dims
"""
return WithDims(dims).apply(self)
@property
def lms(self):
"""Deprecated.
Maintained for compatibility, will be removed in a future version.
Returns a copy of this object, which previously would have held
the 'underlying' :map:`PointCloud` subclass.
:type: self
"""
from menpo.base import MenpoDeprecationWarning
warnings.warn(
"The .lms property is deprecated. LandmarkGroups are "
"now shapes themselves - so you can use them directly "
"anywhere you previously used .lms."
'Simply remove ".lms" from your code and things '
"will work as expected (and this warning will go away)",
MenpoDeprecationWarning,
)
return self.copy()
@property
def n_points(self):
r"""
The number of points in the pointcloud.
:type: `int`
"""
return self.points.shape[0]
@property
def n_dims(self):
r"""
The number of dimensions in the pointcloud.
:type: `int`
"""
return self.points.shape[1]
def h_points(self):
r"""
Convert poincloud to a homogeneous array: ``(n_dims + 1, n_points)``
:type: ``type(self)``
"""
return np.concatenate(
(self.points.T, np.ones(self.n_points, dtype=self.points.dtype)[None, :])
)
def centre(self):
r"""
The mean of all the points in this PointCloud (centre of mass).
Returns
-------
centre : ``(n_dims)`` `ndarray`
The mean of this PointCloud's points.
"""
return np.mean(self.points, axis=0)
def centre_of_bounds(self):
r"""
The centre of the absolute bounds of this PointCloud. Contrast with
:meth:`centre`, which is the mean point position.
Returns
-------
centre : ``n_dims`` `ndarray`
The centre of the bounds of this PointCloud.
"""
min_b, max_b = self.bounds()
return (min_b + max_b) / 2.0
def _as_vector(self):
r"""
Returns a flattened representation of the pointcloud.
Note that the flattened representation is of the form
``[x0, y0, x1, y1, ....., xn, yn]`` for 2D.
Returns
-------
flattened : ``(n_points,)`` `ndarray`
The flattened points.
"""
return self.points.ravel()
def tojson(self):
r"""
Convert this :map:`PointCloud` to a dictionary representation suitable
for inclusion in the LJSON landmark format.
Returns
-------
json : `dict`
Dictionary with ``points`` keys.
"""
return {"labels": [], "landmarks": {"points": self.points.tolist()}}
def _from_vector_inplace(self, vector):
r"""
Updates the points of this PointCloud in-place with the reshaped points
from the provided vector. Note that the vector should have the form
``[x0, y0, x1, y1, ....., xn, yn]`` for 2D.
Parameters
----------
vector : ``(n_points,)`` `ndarray`
The vector from which to create the points' array.
"""
self.points = vector.reshape([-1, self.n_dims])
def __str__(self):
return "{}: n_points: {}, n_dims: {}".format(
type(self).__name__, self.n_points, self.n_dims
)
def bounds(self, boundary=0):
r"""
The minimum to maximum extent of the PointCloud. An optional boundary
argument can be provided to expand the bounds by a constant margin.
Parameters
----------
boundary : `float`
A optional padding distance that is added to the bounds. Default
is ``0``, meaning the max/min of tightest possible containing
square/cube/hypercube is returned.
Returns
-------
min_b : ``(n_dims,)`` `ndarray`
The minimum extent of the :map:`PointCloud` and boundary along
each dimension
max_b : ``(n_dims,)`` `ndarray`
The maximum extent of the :map:`PointCloud` and boundary along
each dimension
"""
min_b = np.min(self.points, axis=0) - boundary
max_b = np.max(self.points, axis=0) + boundary
return min_b, max_b
def range(self, boundary=0):
r"""
The range of the extent of the PointCloud.
Parameters
----------
boundary : `float`
A optional padding distance that is used to extend the bounds
from which the range is computed. Default is ``0``, no extension
is performed.
Returns
-------
range : ``(n_dims,)`` `ndarray`
The range of the :map:`PointCloud` extent in each dimension.
"""
min_b, max_b = self.bounds(boundary)
return max_b - min_b
def bounding_box(self):
r"""
Return a bounding box from two corner points as a directed graph.
In the case of a 2D pointcloud, first point (0) should be nearest the
origin. In the case of an image, this ordering would appear as:
::
0<--3
| ^
| |
v |
1-->2
In the case of a pointcloud, the ordering will appear as:
::
3<--2
| ^
| |
v |
0-->1
In the case of a 3D pointcloud, the first point (0) should be the
near closest to the origin and the second point is the far opposite
corner.
Returns
-------
bounding_box : :map:`PointDirectedGraph`
The axis aligned bounding box of the PointCloud.
"""
if self.n_dims != 2 and self.n_dims != 3:
raise ValueError(
"Bounding boxes are only supported for 2D or 3D " "pointclouds."
)
min_p, max_p = self.bounds()
if self.n_dims == 2:
return bounding_box(min_p, max_p)
elif self.n_dims == 3:
return bounding_cuboid(min_p, max_p)
def _view_2d(
self,
figure_id=None,
new_figure=False,
image_view=True,
render_markers=True,
marker_style="o",
marker_size=5,
marker_face_colour="r",
marker_edge_colour="k",
marker_edge_width=1.0,
render_numbering=False,
numbers_horizontal_align="center",
numbers_vertical_align="bottom",
numbers_font_name="sans-serif",
numbers_font_size=10,
numbers_font_style="normal",
numbers_font_weight="normal",
numbers_font_colour="k",
render_axes=True,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
axes_x_limits=None,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
figure_size=(7, 7),
label=None,
**kwargs,
):
r"""
Visualization of the PointCloud in 2D.
Returns
-------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
image_view : `bool`, optional
If ``True`` the PointCloud will be viewed as if it is in the image
coordinate system.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers. Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The width of the markers' edge.
render_numbering : `bool`, optional
If ``True``, the landmarks will be numbered.
numbers_horizontal_align : ``{center, right, left}``, optional
The horizontal alignment of the numbers' texts.
numbers_vertical_align : ``{center, top, bottom, baseline}``, optional
The vertical alignment of the numbers' texts.
numbers_font_name : See Below, optional
The font of the numbers. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
numbers_font_size : `int`, optional
The font size of the numbers.
numbers_font_style : ``{normal, italic, oblique}``, optional
The font style of the numbers.
numbers_font_weight : See Below, optional
The font weight of the numbers.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
numbers_font_colour : See Below, optional
The font colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : {``normal``, ``italic``, ``oblique``}, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the PointCloud as a percentage of the PointCloud's
width. If `tuple` or `list`, then it defines the axis limits. If
``None``, then the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the PointCloud as a percentage of the PointCloud's
height. If `tuple` or `list`, then it defines the axis limits. If
``None``, then the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None``, optional
The size of the figure in inches.
label : `str`, optional
The name entry in case of a legend.
Returns
-------
viewer : :map:`PointGraphViewer2d`
The viewer object.
"""
from menpo.visualize.base import PointGraphViewer2d
adjacency_array = np.empty(0)
renderer = PointGraphViewer2d(
figure_id, new_figure, self.points, adjacency_array
)
renderer.render(
image_view=image_view,
render_lines=False,
line_colour="b",
line_style="-",
line_width=1.0,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks,
figure_size=figure_size,
label=label,
)
return renderer
def _view_landmarks_2d(
self,
group=None,
with_labels=None,
without_labels=None,
figure_id=None,
new_figure=False,
image_view=True,
render_markers=True,
marker_style="s",
marker_size=7,
marker_face_colour="k",
marker_edge_colour="k",
marker_edge_width=1.0,
render_lines_lms=True,
line_colour_lms=None,
line_style_lms="-",
line_width_lms=1,
render_markers_lms=True,
marker_style_lms="o",
marker_size_lms=5,
marker_face_colour_lms=None,
marker_edge_colour_lms=None,
marker_edge_width_lms=1.0,
render_numbering=False,
numbers_horizontal_align="center",
numbers_vertical_align="bottom",
numbers_font_name="sans-serif",
numbers_font_size=10,
numbers_font_style="normal",
numbers_font_weight="normal",
numbers_font_colour="k",
render_legend=False,
legend_title="",
legend_font_name="sans-serif",
legend_font_style="normal",
legend_font_size=10,
legend_font_weight="normal",
legend_marker_scale=None,
legend_location=2,
legend_bbox_to_anchor=(1.05, 1.0),
legend_border_axes_pad=None,
legend_n_columns=1,
legend_horizontal_spacing=None,
legend_vertical_spacing=None,
legend_border=True,
legend_border_padding=None,
legend_shadow=False,
legend_rounded_corners=False,
render_axes=False,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
axes_x_limits=None,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
figure_size=(7, 7),
):
"""
Visualize the landmarks. This method will appear on the `PointCloud` as
``view_landmarks``.
Parameters
----------
group : `str` or``None`` optional
The landmark group to be visualized. If ``None`` and there are more
than one landmark groups, an error is raised.
with_labels : ``None`` or `str` or `list` of `str`, optional
If not ``None``, only show the given label(s). Should **not** be
used with the ``without_labels`` kwarg.
without_labels : ``None`` or `str` or `list` of `str`, optional
If not ``None``, show all except the given label(s). Should **not**
be used with the ``with_labels`` kwarg.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
image_view : `bool`, optional
If ``True`` the PointCloud will be viewed as if it is in the image
coordinate system.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers. Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The width of the markers' edge.
render_lines_lms : `bool`, optional
If ``True``, the edges of the landmarks will be rendered.
line_colour_lms : See Below, optional
The colour of the lines of the landmarks.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style_lms : ``{-, --, -., :}``, optional
The style of the lines of the landmarks.
line_width_lms : `float`, optional
The width of the lines of the landmarks.
render_markers : `bool`, optional
If ``True``, the markers of the landmarks will be rendered.
marker_style : See Below, optional
The style of the markers of the landmarks. Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the markers of the landmarks in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers of the landmarks.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : See Below, optional
The edge colour of the markers of the landmarks.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The width of the markers' edge of the landmarks.
render_numbering : `bool`, optional
If ``True``, the landmarks will be numbered.
numbers_horizontal_align : ``{center, right, left}``, optional
The horizontal alignment of the numbers' texts.
numbers_vertical_align : ``{center, top, bottom, baseline}``, optional
The vertical alignment of the numbers' texts.
numbers_font_name : See Below, optional
The font of the numbers. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
numbers_font_size : `int`, optional
The font size of the numbers.
numbers_font_style : ``{normal, italic, oblique}``, optional
The font style of the numbers.
numbers_font_weight : See Below, optional
The font weight of the numbers.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
numbers_font_colour : See Below, optional
The font colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_legend : `bool`, optional
If ``True``, the legend will be rendered.
legend_title : `str`, optional
The title of the legend.
legend_font_name : See below, optional
The font of the legend. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
legend_font_style : ``{normal, italic, oblique}``, optional
The font style of the legend.
legend_font_size : `int`, optional
The font size of the legend.
legend_font_weight : See Below, optional
The font weight of the legend.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
legend_marker_scale : `float`, optional
The relative size of the legend markers with respect to the original
legend_location : `int`, optional
The location of the legend. The predefined values are:
=============== ==
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== ==
legend_bbox_to_anchor : (`float`, `float`) `tuple`, optional
The bbox that the legend will be anchored.
legend_border_axes_pad : `float`, optional
The pad between the axes and legend border.
legend_n_columns : `int`, optional
The number of the legend's columns.
legend_horizontal_spacing : `float`, optional
The spacing between the columns.
legend_vertical_spacing : `float`, optional
The vertical space between the legend entries.
legend_border : `bool`, optional
If ``True``, a frame will be drawn around the legend.
legend_border_padding : `float`, optional
The fractional whitespace inside the legend border.
legend_shadow : `bool`, optional
If ``True``, a shadow will be drawn behind legend.
legend_rounded_corners : `bool`, optional
If ``True``, the frame's corners will be rounded (fancybox).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{normal, italic, oblique}``, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold,demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the PointCloud as a percentage of the PointCloud's
width. If `tuple` or `list`, then it defines the axis limits. If
``None``, then the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the PointCloud as a percentage of the PointCloud's
height. If `tuple` or `list`, then it defines the axis limits. If
``None``, then the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None`` optional
The size of the figure in inches.
Raises
------
ValueError
If both ``with_labels`` and ``without_labels`` are passed.
ValueError
If the landmark manager doesn't contain the provided group label.
"""
if not self.has_landmarks:
raise ValueError(
"PointCloud does not have landmarks attached, "
"unable to view landmarks."
)
self_view = self.view(
figure_id=figure_id,
new_figure=new_figure,
image_view=image_view,
figure_size=figure_size,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
)
# correct group label in legend
if group is None:
group = self.landmarks.group_labels[0]
landmark_view = self.landmarks[group].view(
with_labels=with_labels,
without_labels=without_labels,
figure_id=self_view.figure_id,
new_figure=False,
group=group,
image_view=image_view,
render_lines=render_lines_lms,
line_colour=line_colour_lms,
line_style=line_style_lms,
line_width=line_width_lms,
render_markers=render_markers_lms,
marker_style=marker_style_lms,
marker_size=marker_size_lms,
marker_face_colour=marker_face_colour_lms,
marker_edge_colour=marker_edge_colour_lms,
marker_edge_width=marker_edge_width_lms,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_legend=render_legend,
legend_title=legend_title,
legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks,
figure_size=figure_size,
)
return landmark_view
def _view_3d(
self,
figure_id=None,
new_figure=True,
render_markers=True,
marker_style="sphere",
marker_size=None,
marker_colour="r",
marker_resolution=8,
step=None,
alpha=1.0,
render_numbering=False,
numbers_colour="k",
numbers_size=None,
**kwargs,
):
r"""
Visualization of the PointCloud in 3D.
Parameters
----------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : `str`, optional
The style of the markers.
Example options ::
{2darrow, 2dcircle, 2dcross, 2ddash, 2ddiamond, 2dhooked_arrow,
2dsquare, 2dthick_arrow, 2dthick_cross, 2dtriangle, 2dvertex,
arrow, axes, cone, cube, cylinder, point, sphere}
marker_size : `float` or ``None``, optional
The size of the markers. This size can be seen as a scale factor
applied to the size markers, which is by default calculated from
the inter-marker spacing. If ``None``, then an optimal marker size
value will be set automatically.
marker_colour : See Below, optional
The colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_resolution : `int`, optional
The resolution of the markers. For spheres, for instance, this is
the number of divisions along theta and phi.
step : `int` or ``None``, optional
If `int`, then one every `step` vertexes will be rendered.
If ``None``, then all vertexes will be rendered.
alpha : `float`, optional
Defines the transparency (opacity) of the object.
render_numbering : `bool`, optional
If ``True``, the points will be numbered.
numbers_colour : See Below, optional
The colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
numbers_size : `float` or ``None``, optional
The size of the numbers. This size can be seen as a scale factor
applied to the numbers, which is by default calculated from
the inter-marker spacing. If ``None``, then an optimal numbers size
value will be set automatically.
Returns
-------
renderer : `menpo3d.visualize.PointGraphViewer3d`
The Menpo3D rendering object.
"""
try:
from menpo3d.visualize import PointGraphViewer3d
edges = np.empty(0)
renderer = PointGraphViewer3d(figure_id, new_figure, self.points, edges)
renderer.render(
render_lines=False,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_colour=marker_colour,
marker_resolution=marker_resolution,
step=step,
alpha=alpha,
render_numbering=render_numbering,
numbers_colour=numbers_colour,
numbers_size=numbers_size,
)
return renderer
except ImportError as e:
from menpo.visualize import Menpo3dMissingError
raise Menpo3dMissingError(e)
def _view_landmarks_3d(
self,
group=None,
with_labels=None,
without_labels=None,
figure_id=None,
new_figure=True,
render_lines=True,
line_colour=None,
line_width=4,
render_markers=True,
marker_style="sphere",
marker_size=None,
marker_colour=None,
marker_resolution=8,
step=None,
alpha=1.0,
render_numbering=False,
numbers_colour="k",
numbers_size=None,
):
r"""
Visualization of the PointCloud landmarks in 3D.
Parameters
----------
with_labels : ``None`` or `str` or `list` of `str`, optional
If not ``None``, only show the given label(s). Should **not** be
used with the ``without_labels`` kwarg.
without_labels : ``None`` or `str` or `list` of `str`, optional
If not ``None``, show all except the given label(s). Should **not**
be used with the ``with_labels`` kwarg.
group : `str` or `None`, optional
The landmark group to be visualized. If ``None`` and there are more
than one landmark groups, an error is raised.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
render_lines : `bool`, optional
If ``True``, then the lines will be rendered.
line_colour : See Below, optional
The colour of the lines. If ``None``, a different colour will be
automatically selected for each label.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
or
None
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, then the markers will be rendered.
marker_style : `str`, optional
The style of the markers.
Example options ::
{2darrow, 2dcircle, 2dcross, 2ddash, 2ddiamond, 2dhooked_arrow,
2dsquare, 2dthick_arrow, 2dthick_cross, 2dtriangle, 2dvertex,
arrow, axes, cone, cube, cylinder, point, sphere}
marker_size : `float` or ``None``, optional
The size of the markers. This size can be seen as a scale factor
applied to the size markers, which is by default calculated from
the inter-marker spacing. If ``None``, then an optimal marker size
value will be set automatically.
marker_colour : See Below, optional
The colour of the markers. If ``None``, a different colour will be
automatically selected for each label.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
or
None
marker_resolution : `int`, optional
The resolution of the markers. For spheres, for instance, this is
the number of divisions along theta and phi.
step : `int` or ``None``, optional
If `int`, then one every `step` vertexes will be rendered.
If ``None``, then all vertexes will be rendered.
alpha : `float`, optional
Defines the transparency (opacity) of the object.
render_numbering : `bool`, optional
If ``True``, the points will be numbered.
numbers_colour : See Below, optional
The colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
numbers_size : `float` or ``None``, optional
The size of the numbers. This size can be seen as a scale factor
applied to the numbers, which is by default calculated from
the inter-marker spacing. If ``None``, then an optimal numbers size
value will be set automatically.
Returns
-------
renderer : `menpo3d.visualize.LandmarkViewer3d`
The Menpo3D rendering object.
"""
if not self.has_landmarks:
raise ValueError(
"PointCloud does not have landmarks attached, "
"unable to view landmarks."
)
self_view = self.view(figure_id=figure_id, new_figure=new_figure)
landmark_view = self.landmarks[group].view(
with_labels=with_labels,
without_labels=without_labels,
figure_id=self_view.figure_id,
new_figure=False,
render_lines=render_lines,
line_colour=line_colour,
line_width=line_width,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_colour=marker_colour,
marker_resolution=marker_resolution,
step=step,
alpha=alpha,
render_numbering=render_numbering,
numbers_colour=numbers_colour,
numbers_size=numbers_size,
)
return landmark_view
@viewwrapper
def view_widget(self,):
r"""
Abstract method for viewing with an interactive widget. See the
:map:`viewwrapper` documentation for an explanation of how the
`view_widget` method works.
"""
pass
def _view_widget_2d(self, figure_size=(7, 7)):
r"""
Visualization of the PointCloud using an interactive widget.
Parameters
----------
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
"""
try:
from menpowidgets import view_widget
view_widget(self, figure_size=figure_size)
except ImportError as e:
from menpo.visualize.base import MenpowidgetsMissingError
raise MenpowidgetsMissingError(e)
def _view_widget_3d(self):
r"""
Visualization of the PointCloud using an interactive widget.
"""
try:
from menpowidgets import view_widget
view_widget(self)
except ImportError as e:
from menpo.visualize.base import MenpowidgetsMissingError
raise MenpowidgetsMissingError(e)
def _transform_self_inplace(self, transform):
self.points = transform(self.points)
return self
def distance_to(self, pointcloud, **kwargs):
r"""
Returns a distance matrix between this PointCloud and another.
By default the Euclidean distance is calculated - see
`scipy.spatial.distance.cdist` for valid kwargs to change the metric
and other properties.
Parameters
----------
pointcloud : :map:`PointCloud`
The second pointcloud to compute distances between. This must be
of the same dimension as this PointCloud.
Returns
-------
distance_matrix: ``(n_points, n_points)`` `ndarray`
The symmetric pairwise distance matrix between the two PointClouds
s.t. ``distance_matrix[i, j]`` is the distance between the i'th
point of this PointCloud and the j'th point of the input
PointCloud.
"""
if self.n_dims != pointcloud.n_dims:
raise ValueError(
"The two PointClouds must be of the same " "dimensionality."
)
return cdist(self.points, pointcloud.points, **kwargs)
def norm(self, **kwargs):
r"""
Returns the norm of this PointCloud. This is a translation and
rotation invariant measure of the point cloud's intrinsic size - in
other words, it is always taken around the point cloud's centre.
By default, the Frobenius norm is taken, but this can be changed by
setting kwargs - see ``numpy.linalg.norm`` for valid options.
Returns
-------
norm : `float`
The norm of this :map:`PointCloud`
"""
return np.linalg.norm(self.points - self.centre(), **kwargs)
def from_mask(self, mask):
"""
A 1D boolean array with the same number of elements as the number of
points in the PointCloud. This is then broadcast across the dimensions
of the PointCloud and returns a new PointCloud containing only those
points that were ``True`` in the mask.
Parameters
----------
mask : ``(n_points,)`` `ndarray`
1D array of booleans
Returns
-------
pointcloud : :map:`PointCloud`
A new pointcloud that has been masked.
Raises
------
ValueError
Mask must have same number of points as pointcloud.
"""
if mask.shape[0] != self.n_points:
raise ValueError(
"Mask must be a 1D boolean array of the same "
"number of entries as points in this PointCloud."
)
pc = self.copy()
pc.points = pc.points[mask, :]
return pc
def constrain_to_bounds(self, bounds):
r"""
Returns a copy of this PointCloud, constrained to lie exactly within
the given bounds. Any points outside the bounds will be 'snapped'
to lie *exactly* on the boundary.
Parameters
----------
bounds : ``(n_dims, n_dims)`` tuple of scalars
The bounds to constrain this pointcloud within.
Returns
-------
constrained : :map:`PointCloud`
The constrained pointcloud.
"""
pc = self.copy()
for k in range(pc.n_dims):
tmp = pc.points[:, k]
tmp[tmp < bounds[0][k]] = bounds[0][k]
tmp[tmp > bounds[1][k]] = bounds[1][k]
pc.points[:, k] = tmp
return pc
| 36.118741 | 88 | 0.575455 |
b9602045e75f1071a0863f078b81897c2c3784bb
| 1,481 |
py
|
Python
|
py_yr/weatherdata/forecast/tabular/time/time.py
|
Matmonsen/py-yr
|
2e729959ae9fe334686971c6f62ccb0bf2d8b9a4
|
[
"MIT"
] | null | null | null |
py_yr/weatherdata/forecast/tabular/time/time.py
|
Matmonsen/py-yr
|
2e729959ae9fe334686971c6f62ccb0bf2d8b9a4
|
[
"MIT"
] | null | null | null |
py_yr/weatherdata/forecast/tabular/time/time.py
|
Matmonsen/py-yr
|
2e729959ae9fe334686971c6f62ccb0bf2d8b9a4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from py_yr import utilities
from .symbol import Symbol
from .precipitation import Precipitation
from .winddirection import WindDirection
from .windspeed import WindSpeed
from .temperature import Temperature
from .pressure import Pressure
class Time(object):
def __init__(self, time):
self.from_ = utilities.parse_iso8601(time['from'])
self.to = utilities.parse_iso8601(time['to'])
try:
self.period = int(time['period'])
except KeyError:
self.period = None
self.symbol = Symbol(time['symbol'])
self.precipitation = Precipitation(time['precipitation'])
self.windDirection = WindDirection(time['windDirection'])
self.windSpeed = WindSpeed(time['windSpeed'])
self.temperature = Temperature(time['temperature'])
self.pressure = Pressure(time['pressure'])
def __str__(self):
return '\t\t\tFrom: {0} ' \
'\n\t\t\tTo: {1} ' \
'\n\t\t\tPeriod: {2} ' \
'\n\t\t\tSymbol: \n{3} ' \
'\n\t\t\tPrecipitation: \n{4} ' \
'\n\t\t\tWindDirection: \n{5} ' \
'\n\t\t\tWindSpeed: \n{6} ' \
'\n\t\t\tTemperature: \n{7} ' \
'\n\t\t\tPressure: \n{8}'.format(self.from_, self.to, self.period, self.symbol, self.precipitation,
self.windDirection, self.windSpeed, self.temperature, self.precipitation)
| 40.027027 | 114 | 0.584065 |
314b81f2d6eb22cd7745fa83cf8cfe3b841ec879
| 1,536 |
py
|
Python
|
actions/acoslib/action.py
|
StackStorm-Exchange/acos
|
485ab159eb98e7b376a804c651336a5912342065
|
[
"Apache-2.0"
] | 2 |
2017-07-11T10:43:59.000Z
|
2017-09-13T03:03:37.000Z
|
actions/acoslib/action.py
|
StackStorm-Exchange/acos
|
485ab159eb98e7b376a804c651336a5912342065
|
[
"Apache-2.0"
] | 11 |
2017-05-09T11:54:25.000Z
|
2022-02-24T23:36:04.000Z
|
actions/acoslib/action.py
|
StackStorm-Exchange/acos
|
485ab159eb98e7b376a804c651336a5912342065
|
[
"Apache-2.0"
] | 3 |
2017-05-07T13:17:58.000Z
|
2021-01-28T17:33:47.000Z
|
import acos_client as acos
import logging
from st2common.runners.base_action import Action
class BaseAction(Action):
DEFAULT_AXAPI_VERSION_STR = 'v3.0'
DEFAULT_AXAPI_VERSION = acos.AXAPI_30
# These are the parameters for acos pack, not used by the ACOS Client
PARAMS_FOR_PACK = ['appliance', 'action', 'object_path', 'one_target']
def __init__(self, config):
super(BaseAction, self).__init__(config)
self._set_loglevel(logging.INFO)
self.config = config
def login(self, appliance):
try:
config = next(x for x in self.config['appliance'] if x['target'] == appliance)
return acos.Client(config['target'],
config['api_version'],
config['userid'],
config['passwd'])
except acos.errors.ACOSUnsupportedVersion as e:
self.logger.error(e)
except KeyError as e:
self.logger.error(e)
except StopIteration:
self.logger.error("Specified appliance(%s) doesn't exist in the configuration file " %
appliance)
def get_object(self, base_obj, object_path):
obj = base_obj
for path in object_path.split('.'):
obj = getattr(obj, path)
return obj
def _set_loglevel(self, level):
for key, logger in logging.Logger.manager.loggerDict.items():
if isinstance(logger, logging.Logger):
logger.setLevel(level)
| 32.680851 | 98 | 0.597656 |
5e7e44732433edeb14bddd6577762f2dd9707d92
| 6,902 |
py
|
Python
|
infoblox_netmri/api/remote/models/issue_detail_remote.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/remote/models/issue_detail_remote.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/remote/models/issue_detail_remote.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class IssueDetailRemote(RemoteModel):
"""
The issues NetMRI has identified on the network. This includes the common issue fields described below, but not fields specific to each issue type.
| ``DataSourceID:`` The internal NetMRI identifier for the collector NetMRI that raised this issue.
| ``attribute type:`` number
| ``IssueID:`` The internal NetMRI identifier for this issue instance.
| ``attribute type:`` number
| ``StartTime:`` The starting effective time of this revision of the record.
| ``attribute type:`` datetime
| ``EndTime:`` The ending effective time of this revision of this record, or empty if still in effect.
| ``attribute type:`` datetime
| ``ChangedCols:`` The fields that changed between this revision of the record and the previous revision.
| ``attribute type:`` string
| ``Timestamp:`` The date and time this record was collected or calculated.
| ``attribute type:`` datetime
| ``IssueTypeID:`` An internal NetMRI identifier for the type of this issue.
| ``attribute type:`` string
| ``DetailID:`` A unique identifier for this issue instance.
| ``attribute type:`` string
| ``DeviceID:`` The internal NetMRI identifier for the device to which this issue applies.
| ``attribute type:`` number
| ``InterfaceID:`` The internal NetMRI identifier for the interface to which this issue applies, if relevant.
| ``attribute type:`` number
| ``VlanID:`` The internal NetMRI identifier of the VLAN to which this issue applies, if relevant.
| ``attribute type:`` number
| ``SubnetID:`` The internal NetMRI identifier for the subnet to which this issue applies, if relevant.
| ``attribute type:`` number
| ``IprgID:`` The internal NetMRI identifier for the HSRP or VRRP group to which this issue applies, if relevant.
| ``attribute type:`` number
| ``BatchID:`` The internal NetMRI identifier for the job execution batch to which this issue applies, if relevant.
| ``attribute type:`` number
| ``AltDeviceID:`` The internal NetMRI identifier of the alternate device (such as a neighbor) involved in this issue, if relevant.
| ``attribute type:`` number
| ``Criteria:`` The criteria value for this issue at the time it was raised.
| ``attribute type:`` string
| ``IssueValue:`` The meaning of this field varies based upon the specific issue.
| ``attribute type:`` string
| ``Component:`` The issue component (Devices, Configuration, VLANs, etc.).
| ``attribute type:`` string
| ``SeverityID:`` The issue severity ID (1 = Error, 2 = Warning, 3 = Info). Useful for sorting.
| ``attribute type:`` number
| ``Correctness:`` The correctness contribution for this issue.
| ``attribute type:`` float
| ``Stability:`` The stability contribution for this issue.
| ``attribute type:`` float
| ``SuppressedInd:`` A flag indicating whether this issue is suppressed or not.
| ``attribute type:`` bool
| ``StartTime:`` The date/time this issue instance was raised.
| ``attribute type:`` datetime
| ``title:`` The descriptive title for this type of issue.
| ``attribute type:`` string
| ``severity:`` The issue severity.
| ``attribute type:`` string
"""
properties = ("DataSourceID",
"IssueID",
"StartTime",
"EndTime",
"ChangedCols",
"Timestamp",
"IssueTypeID",
"DetailID",
"DeviceID",
"InterfaceID",
"VlanID",
"SubnetID",
"IprgID",
"BatchID",
"AltDeviceID",
"Criteria",
"IssueValue",
"Component",
"SeverityID",
"Correctness",
"Stability",
"SuppressedInd",
"StartTime",
"title",
"severity",
)
@property
@check_api_availability
def data_source(self):
"""
The NetMRI device that raised this issue.
``attribute type:`` model
"""
return self.broker.data_source(**{"IssueID": self.IssueID })
@property
@check_api_availability
def device(self):
"""
The device to which this issue applies.
``attribute type:`` model
"""
return self.broker.device(**{"IssueID": self.IssueID })
@property
@check_api_availability
def interface(self):
"""
The interface to which this issue applies, if relevant.
``attribute type:`` model
"""
return self.broker.interface(**{"IssueID": self.IssueID })
@property
@check_api_availability
def vlan(self):
"""
The VLAN to which this issue applies, if relevant.
``attribute type:`` model
"""
return self.broker.vlan(**{"IssueID": self.IssueID })
@property
@check_api_availability
def subnet(self):
"""
The subnet to which this issue applies, if relevant.
``attribute type:`` model
"""
return self.broker.subnet(**{"IssueID": self.IssueID })
@property
@check_api_availability
def iprg(self):
"""
The HSRP or VRRP group to which this issue applies, if relevant.
``attribute type:`` model
"""
return self.broker.iprg(**{"IssueID": self.IssueID })
@property
@check_api_availability
def alternate_device(self):
"""
The alternate device (such as a neighbor) involved in this issue, if relevant.
``attribute type:`` model
"""
return self.broker.alternate_device(**{"IssueID": self.IssueID })
@property
@check_api_availability
def issue_desc(self):
"""
Information such as title and description, that depends only on the issue type, and does not change with each issue instance.
``attribute type:`` model
"""
return self.broker.issue_desc(**{"IssueID": self.IssueID })
@property
@check_api_availability
def infradevice(self):
"""
The device to which this issue applies.
``attribute type:`` model
"""
return self.broker.infradevice(**{"IssueID": self.IssueID })
| 30.139738 | 151 | 0.572008 |
2c1bb29d57213e2713840d2d14c37a7f5f7a505f
| 3,856 |
py
|
Python
|
bfs.py
|
maheshreddykukunooru/Sokoban-Game
|
a009e00a905d29490b731c8773bf75cfcc95d48e
|
[
"MIT"
] | 2 |
2020-09-22T12:56:18.000Z
|
2021-07-02T22:55:18.000Z
|
bfs.py
|
maheshreddykukunooru/Sokoban-Game
|
a009e00a905d29490b731c8773bf75cfcc95d48e
|
[
"MIT"
] | 1 |
2019-04-22T14:30:54.000Z
|
2019-07-27T12:05:17.000Z
|
bfs.py
|
maheshreddykukunooru/Sokoban-Game
|
a009e00a905d29490b731c8773bf75cfcc95d48e
|
[
"MIT"
] | 2 |
2019-08-02T21:05:12.000Z
|
2020-11-06T00:10:10.000Z
|
import collections
import copy
board=[]
maxLength=0
boxRobot=[]
wallsStorageSpaces=[]
possibleMoves = {'U':[-1,0], 'R':[0,1],'D':[1,0],'L':[0,-1]}
maxRowLength = 0
lines=0
print "Enter the board configuration:\n\n"
while(1):
line =raw_input()
if line!="":
lines+=1
board.append(line)
if len(line)>maxRowLength:
maxRowLength=len(line)
else:
break
import time
time_start = time.clock()
for i in range(0,lines):
boxRobot.append([])
wallsStorageSpaces.append([])
for j in range(0,maxRowLength):
boxRobot[-1].append('-')
wallsStorageSpaces[-1].append('-')
## Making the board a rectangle even if the input is not one
for i in range(0,len(board)):
if len(board[i])<maxRowLength:
for j in range(len(board[i]),maxRowLength):
board[i]+='O'
## Storing walls&storage spaces in one 2d array , boxes and robot in another 2d array
for i in range(0,len(board)):
for j in range(0,maxRowLength):
if board[i][j]=='B' or board[i][j]=='R':
boxRobot[i][j]=board[i][j]
wallsStorageSpaces[i][j]=' '
elif board[i][j]=='S' or board[i][j]=='O':
wallsStorageSpaces[i][j] = board[i][j]
boxRobot[i][j] = ' '
elif board[i][j]==' ':
boxRobot[i][j] = ' '
wallsStorageSpaces[i][j]=' '
elif board[i][j] == '*':
boxRobot[i][j] = 'B'
wallsStorageSpaces[i][j] = 'S'
elif board[i][j] == '.':
boxRobot[i][j] = 'R'
wallsStorageSpaces[i][j] = 'S'
##BFS
print "Solving using BFS\n"
movesList=[]
visitedMoves=[]
## Adding source to queue
queue = collections.deque([])
source = [boxRobot,movesList]
if boxRobot not in visitedMoves:
visitedMoves.append(boxRobot)
queue.append(source)
robot_x = -1
robot_y = -1
completed = 0
while len(queue)!=0 and completed==0:
### Popping first item from the queue
temp = queue.popleft()
curPosition = temp[0]
movesTillNow = temp[1]
for i in range(0,lines):
for j in range(0,maxRowLength):
if curPosition[i][j]=='R':
robot_y = j
robot_x = i
break
else:
continue
break
###Getting robot position of the popped element.
for key in possibleMoves:
## Checking for all the four directions
robotNew_x = robot_x+possibleMoves[key][0]
robotNew_y = robot_y+possibleMoves[key][1]
curPositionCopy = copy.deepcopy(curPosition)
movesTillNowCopy = copy.deepcopy(movesTillNow)
if curPositionCopy[robotNew_x][robotNew_y] == 'B':
## If there is a box after robot makes a move
boxNew_x = robotNew_x + possibleMoves[key][0]
boxNew_y = robotNew_y + possibleMoves[key][1]
if curPositionCopy[boxNew_x][boxNew_y]=='B' or wallsStorageSpaces[boxNew_x][boxNew_y]=='O':
## if the cell after robot pushes the box is another box or wall, avoid further steps.
continue
else:
## if the robot can push the block
curPositionCopy[boxNew_x][boxNew_y]='B'
curPositionCopy[robotNew_x][robotNew_y] = 'R'
curPositionCopy[robot_x][robot_y] = ' '
if curPositionCopy not in visitedMoves:
matches= 0
for k in range(0,lines):
for l in range(0,maxRowLength):
if wallsStorageSpaces[k][l]=='S':
if curPositionCopy[k][l]!='B':
matches=1
movesTillNowCopy.append(key)
if matches == 0:
completed = 1
print movesTillNowCopy
else:
queue.append([curPositionCopy,movesTillNowCopy])
visitedMoves.append(curPositionCopy)
else:
## if the robot moves into a wall
if wallsStorageSpaces[robotNew_x][robotNew_y]=='O':
continue
else:
## if the robot moves into empty space
curPositionCopy[robotNew_x][robotNew_y]='R'
curPositionCopy[robot_x][robot_y]=' '
if curPositionCopy not in visitedMoves:
movesTillNowCopy.append(key)
queue.append([curPositionCopy,movesTillNowCopy])
visitedMoves.append(curPositionCopy)
if completed==0:
print "Can't make it"
time_end = time.clock()
print "Run time: "+str(time_end - time_start)
| 25.202614 | 94 | 0.670384 |
ad1961f60d43f14250f3dedd15c38ca72c750a09
| 1,610 |
py
|
Python
|
nsd1812/devops/day03/runpb.py
|
MrWangwf/nsd2019
|
5e859b4b1926dc098d236be3720779c50d0a55fc
|
[
"Apache-2.0"
] | 1 |
2019-09-19T04:53:22.000Z
|
2019-09-19T04:53:22.000Z
|
nsd1812/devops/day03/runpb.py
|
MrWangwf/nsd2019
|
5e859b4b1926dc098d236be3720779c50d0a55fc
|
[
"Apache-2.0"
] | null | null | null |
nsd1812/devops/day03/runpb.py
|
MrWangwf/nsd2019
|
5e859b4b1926dc098d236be3720779c50d0a55fc
|
[
"Apache-2.0"
] | 1 |
2021-12-28T04:26:02.000Z
|
2021-12-28T04:26:02.000Z
|
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars.manager import VariableManager
from ansible.inventory.manager import InventoryManager
from ansible.executor.playbook_executor import PlaybookExecutor
Options = namedtuple(
'Options',
[
'connection',
'remote_user',
'ask_sudo_pass',
'verbosity',
'ask_pass',
'module_path',
'forks',
'become',
'become_method',
'become_user',
'check',
'listhosts',
'listtasks',
'listtags',
'syntax',
'sudo_user',
'sudo',
'diff'
]
)
options = Options(
connection='smart',
remote_user=None,
ask_pass=None,
sudo_user=None,
forks=5,
sudo=None,
ask_sudo_pass=False,
verbosity=5,
module_path=None,
become=None,
become_method=None,
become_user=None,
check=False,
diff=False,
listhosts=None,
listtasks=None,
listtags=None,
syntax=None
)
loader = DataLoader()
passwords = dict()
def runpb(pb_path, sources):
inventory = InventoryManager(loader=loader, sources=sources)
variable_manager = VariableManager(loader=loader, inventory=inventory)
playbook = PlaybookExecutor(
playbooks=pb_path,
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options,
passwords=passwords
)
result = playbook.run()
return result
if __name__ == '__main__':
runpb(pb_path=['myansible/lamp.yml'], sources=['myansible/hosts'])
| 23.333333 | 74 | 0.638509 |
c5b9f297faf43b0e9d6fecde5fe39d9e9a16461e
| 913 |
py
|
Python
|
migrations/versions/28fe85a5334d_third_migration.py
|
Roychela/pitch-it
|
126a412ecc5dadc5421491c405a7e056042758b5
|
[
"MIT"
] | null | null | null |
migrations/versions/28fe85a5334d_third_migration.py
|
Roychela/pitch-it
|
126a412ecc5dadc5421491c405a7e056042758b5
|
[
"MIT"
] | null | null | null |
migrations/versions/28fe85a5334d_third_migration.py
|
Roychela/pitch-it
|
126a412ecc5dadc5421491c405a7e056042758b5
|
[
"MIT"
] | null | null | null |
"""Third Migration
Revision ID: 28fe85a5334d
Revises: fc3137677338
Create Date: 2019-07-01 01:02:13.601778
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '28fe85a5334d'
down_revision = 'fc3137677338'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('comment', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('pitch_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('comments')
# ### end Alembic commands ###
| 25.361111 | 65 | 0.674699 |
8b98e85a07e047a8626ec3ae12f274f23bed4544
| 843 |
py
|
Python
|
httprunner/exception.py
|
QiChangYin/MultipleInterfaceManager
|
0732cbd2dc9065aa4947ab3243136450874579a4
|
[
"MIT"
] | null | null | null |
httprunner/exception.py
|
QiChangYin/MultipleInterfaceManager
|
0732cbd2dc9065aa4947ab3243136450874579a4
|
[
"MIT"
] | null | null | null |
httprunner/exception.py
|
QiChangYin/MultipleInterfaceManager
|
0732cbd2dc9065aa4947ab3243136450874579a4
|
[
"MIT"
] | 1 |
2019-07-04T12:46:20.000Z
|
2019-07-04T12:46:20.000Z
|
# encoding: utf-8
import json
try:
FileNotFoundError = FileNotFoundError
except NameError:
FileNotFoundError = IOError
try:
JSONDecodeError = json.decoder.JSONDecodeError
except AttributeError:
JSONDecodeError = ValueError
class MyBaseError(BaseException):
pass
class FileFormatError(MyBaseError):
pass
class ParamsError(MyBaseError):
pass
class ResponseError(MyBaseError):
pass
class ParseResponseError(MyBaseError):
pass
class ValidationError(MyBaseError):
pass
class NotFoundError(MyBaseError):
pass
class FunctionNotFound(NotFoundError):
pass
class VariableNotFound(NotFoundError):
pass
class ApiNotFound(NotFoundError):
pass
class SuiteNotFound(NotFoundError):
pass
class TestcaseNotFound(NotFoundError):
pass
| 16.86 | 51 | 0.720047 |
4b0ac1649154a5c04f3eb3eb689fc6c41fe32bd9
| 1,068 |
py
|
Python
|
ganjoor/spiders/ferdowsi/pooran.py
|
amirmasoud/ganjoor-crawler
|
a86fe379955ce854765086ab7ba0a78513d052bd
|
[
"MIT"
] | null | null | null |
ganjoor/spiders/ferdowsi/pooran.py
|
amirmasoud/ganjoor-crawler
|
a86fe379955ce854765086ab7ba0a78513d052bd
|
[
"MIT"
] | null | null | null |
ganjoor/spiders/ferdowsi/pooran.py
|
amirmasoud/ganjoor-crawler
|
a86fe379955ce854765086ab7ba0a78513d052bd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import scrapy
class FerdosiShahnameSpider(scrapy.Spider):
name = "pooran"
allowed_domains = ["ganjoor.net"]
start_urls = ["https://ganjoor.net/ferdousi/shahname/pooran/sh1/"]
order = 1
def parse(self, response):
sh = dict()
sh["type"] = "masnavi"
sh["text"] = dict()
for index, poem in enumerate(response.css("div.poem>article>div.b")):
if index == 0:
sh["title"] = response.css("div.poem>article>h2>a::text").extract_first()
sh["order"] = self.order
self.order = self.order + 1
sh["text"][index] = dict([
("m1", poem.css("div.m1>p::text").extract_first()),
("m2", poem.css("div.m2>p::text").extract_first()),
])
yield sh
next_page = response.css("div.navigation>div.navleft>a::attr(href)").extract_first()
if next_page is not None:
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.parse)
| 38.142857 | 92 | 0.557116 |
38e116f98f6a6a91828e8d25cbb0d5ceda02a516
| 238 |
py
|
Python
|
python/testData/inspections/PyCompatibilityInspection/importElement.py
|
GGGGGHT/intellij-community
|
b03adc3af56fe8b9409a5ca2c96cf98aa76a50c3
|
[
"Apache-2.0"
] | null | null | null |
python/testData/inspections/PyCompatibilityInspection/importElement.py
|
GGGGGHT/intellij-community
|
b03adc3af56fe8b9409a5ca2c96cf98aa76a50c3
|
[
"Apache-2.0"
] | null | null | null |
python/testData/inspections/PyCompatibilityInspection/importElement.py
|
GGGGGHT/intellij-community
|
b03adc3af56fe8b9409a5ca2c96cf98aa76a50c3
|
[
"Apache-2.0"
] | 1 |
2020-10-15T05:56:42.000Z
|
2020-10-15T05:56:42.000Z
|
import <warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9 do not have module Bastion">Bastion</warning>
from <warning descr="Python versions 2.6, 2.7 do not have module asyncio">asyncio</warning> import Queue
from io import StringIO
| 59.5 | 108 | 0.752101 |
0553dfb13301cd00015f97eb827b79731ed3ccbf
| 5,016 |
py
|
Python
|
robot/sdk/AliSpeech.py
|
cclauss/wukong-robot
|
34fd54385d0c619703b23ccb8a88128ac0de3491
|
[
"MIT"
] | 1 |
2019-02-21T12:47:22.000Z
|
2019-02-21T12:47:22.000Z
|
robot/sdk/AliSpeech.py
|
cclauss/wukong-robot
|
34fd54385d0c619703b23ccb8a88128ac0de3491
|
[
"MIT"
] | null | null | null |
robot/sdk/AliSpeech.py
|
cclauss/wukong-robot
|
34fd54385d0c619703b23ccb8a88128ac0de3491
|
[
"MIT"
] | 1 |
2021-03-12T01:56:25.000Z
|
2021-03-12T01:56:25.000Z
|
# -*- coding: UTF-8 -*-
import http.client
import urllib.parse
import json
from robot import utils
from robot import logging
import wave
logger = logging.getLogger(__name__)
def processGETRequest(appKey, token, voice, text, format, sampleRate) :
host = 'nls-gateway.cn-shanghai.aliyuncs.com'
url = 'https://' + host + '/stream/v1/tts'
# 设置URL请求参数
url = url + '?appkey=' + appKey
url = url + '&token=' + token
url = url + '&text=' + text
url = url + '&format=' + format
url = url + '&sample_rate=' + str(sampleRate)
url = url + '&voice=' + voice
logger.debug(url)
conn = http.client.HTTPSConnection(host)
conn.request(method='GET', url=url)
# 处理服务端返回的响应
response = conn.getresponse()
logger.debug('Response status and response reason:')
logger.debug(response.status ,response.reason)
contentType = response.getheader('Content-Type')
logger.debug(contentType)
body = response.read()
if 'audio/mpeg' == contentType :
logger.debug('The GET request succeed!')
tmpfile = utils.write_temp_file(body, '.mp3')
conn.close()
return tmpfile
else :
logger.debug('The GET request failed: ' + str(body))
conn.close()
return None
def processPOSTRequest(appKey, token, voice, text, format, sampleRate) :
host = 'nls-gateway.cn-shanghai.aliyuncs.com'
url = 'https://' + host + '/stream/v1/tts'
# 设置HTTPS Headers
httpHeaders = {
'Content-Type': 'application/json'
}
# 设置HTTPS Body
body = {'appkey': appKey, 'token': token, 'text': text, 'format': format, 'sample_rate': sampleRate, 'voice': voice}
body = json.dumps(body)
logger.debug('The POST request body content: ' + body)
# Python 2.x 请使用httplib
# conn = httplib.HTTPSConnection(host)
# Python 3.x 请使用http.client
conn = http.client.HTTPSConnection(host)
conn.request(method='POST', url=url, body=body, headers=httpHeaders)
# 处理服务端返回的响应
response = conn.getresponse()
logger.debug('Response status and response reason:')
logger.debug(response.status ,response.reason)
contentType = response.getheader('Content-Type')
logger.debug(contentType)
body = response.read()
if 'audio/mpeg' == contentType :
logger.debug('The POST request succeed!')
tmpfile = utils.write_temp_file(body, '.mp3')
conn.close()
return tmpfile
else :
logger.critical('The POST request failed: ' + str(body))
conn.close()
return None
def process(request, token, audioContent) :
# 读取音频文件
host = 'nls-gateway.cn-shanghai.aliyuncs.com'
# 设置HTTP请求头部
httpHeaders = {
'X-NLS-Token': token,
'Content-type': 'application/octet-stream',
'Content-Length': len(audioContent)
}
conn = http.client.HTTPConnection(host)
conn.request(method='POST', url=request, body=audioContent, headers=httpHeaders)
response = conn.getresponse()
logger.debug('Response status and response reason:')
logger.debug(response.status ,response.reason)
body = response.read()
try:
logger.debug('Recognize response is:')
body = json.loads(body)
logger.debug(body)
status = body['status']
if status == 20000000 :
result = body['result']
logger.debug('Recognize result: ' + result)
conn.close()
return result
else :
logger.critical('Recognizer failed!')
conn.close()
return None
except ValueError:
logger.debug('The response is not json format string')
conn.close()
return None
def tts(appKey, token, voice, text):
# 采用RFC 3986规范进行urlencode编码
textUrlencode = text
textUrlencode = urllib.parse.quote_plus(textUrlencode)
textUrlencode = textUrlencode.replace("+", "%20")
textUrlencode = textUrlencode.replace("*", "%2A")
textUrlencode = textUrlencode.replace("%7E", "~")
format = 'mp3'
sampleRate = 16000
return processPOSTRequest(appKey, token, voice, text, format, sampleRate)
def asr(appKey, token, wave_file):
# 服务请求地址
url = 'http://nls-gateway.cn-shanghai.aliyuncs.com/stream/v1/asr'
pcm = utils.get_pcm_from_wav(wave_file)
# 音频文件
format = 'pcm'
sampleRate = 16000
enablePunctuationPrediction = True
enableInverseTextNormalization = True
enableVoiceDetection = False
# 设置RESTful请求参数
request = url + '?appkey=' + appKey
request = request + '&format=' + format
request = request + '&sample_rate=' + str(sampleRate)
if enablePunctuationPrediction :
request = request + '&enable_punctuation_prediction=' + 'true'
if enableInverseTextNormalization :
request = request + '&enable_inverse_text_normalization=' + 'true'
if enableVoiceDetection :
request = request + '&enable_voice_detection=' + 'true'
logger.debug('Request: ' + request)
return process(request, token, pcm)
| 35.076923 | 120 | 0.643341 |
6c90f68df28a21c3ad693fdacc563023d2b4fdaf
| 359 |
py
|
Python
|
finitewave/cpuwave2D/stimulation/stim_voltage_coord_2d.py
|
ArsOkenov/Finitewave
|
14274d74be824a395b47a5c53ba18188798ab70d
|
[
"MIT"
] | null | null | null |
finitewave/cpuwave2D/stimulation/stim_voltage_coord_2d.py
|
ArsOkenov/Finitewave
|
14274d74be824a395b47a5c53ba18188798ab70d
|
[
"MIT"
] | null | null | null |
finitewave/cpuwave2D/stimulation/stim_voltage_coord_2d.py
|
ArsOkenov/Finitewave
|
14274d74be824a395b47a5c53ba18188798ab70d
|
[
"MIT"
] | null | null | null |
import numpy as np
from finitewave.core.stimulation import Stim
class StimVoltageCoord2D(Stim):
def __init__(self, time, voltage, x1, x2, y1, y2):
Stim.__init__(self, time, voltage=voltage)
x = np.arange(x1, x2)
y = np.arange(y1, y2)
xx, yy = np.meshgrid(x, y)
self.coords = np.array([xx.ravel(), yy.ravel()]).T
| 27.615385 | 58 | 0.62117 |
be648000ea1bfec896c7228159170e16aa8338ee
| 8,042 |
py
|
Python
|
youtube_dl/extractor/letv.py
|
zoogaezee/youtubeDL
|
01de1a9d506ff51bff4100e11275557226fa8b9a
|
[
"Unlicense"
] | null | null | null |
youtube_dl/extractor/letv.py
|
zoogaezee/youtubeDL
|
01de1a9d506ff51bff4100e11275557226fa8b9a
|
[
"Unlicense"
] | null | null | null |
youtube_dl/extractor/letv.py
|
zoogaezee/youtubeDL
|
01de1a9d506ff51bff4100e11275557226fa8b9a
|
[
"Unlicense"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
import datetime
import re
import time
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_ord,
)
from ..utils import (
determine_ext,
ExtractorError,
parse_iso8601,
sanitized_Request,
int_or_none,
encode_data_uri,
)
class LetvIE(InfoExtractor):
IE_DESC = '乐视网'
_VALID_URL = r'http://www\.letv\.com/ptv/vplay/(?P<id>\d+).html'
_TESTS = [{
'url': 'http://www.letv.com/ptv/vplay/22005890.html',
'md5': 'edadcfe5406976f42f9f266057ee5e40',
'info_dict': {
'id': '22005890',
'ext': 'mp4',
'title': '第87届奥斯卡颁奖礼完美落幕 《鸟人》成最大赢家',
'description': 'md5:a9cb175fd753e2962176b7beca21a47c',
},
'params': {
'hls_prefer_native': True,
},
}, {
'url': 'http://www.letv.com/ptv/vplay/1415246.html',
'info_dict': {
'id': '1415246',
'ext': 'mp4',
'title': '美人天下01',
'description': 'md5:f88573d9d7225ada1359eaf0dbf8bcda',
},
'params': {
'hls_prefer_native': True,
},
}, {
'note': 'This video is available only in Mainland China, thus a proxy is needed',
'url': 'http://www.letv.com/ptv/vplay/1118082.html',
'md5': '2424c74948a62e5f31988438979c5ad1',
'info_dict': {
'id': '1118082',
'ext': 'mp4',
'title': '与龙共舞 完整版',
'description': 'md5:7506a5eeb1722bb9d4068f85024e3986',
},
'params': {
'hls_prefer_native': True,
},
'skip': 'Only available in China',
}]
@staticmethod
def urshift(val, n):
return val >> n if val >= 0 else (val + 0x100000000) >> n
# ror() and calc_time_key() are reversed from a embedded swf file in KLetvPlayer.swf
def ror(self, param1, param2):
_loc3_ = 0
while _loc3_ < param2:
param1 = self.urshift(param1, 1) + ((param1 & 1) << 31)
_loc3_ += 1
return param1
def calc_time_key(self, param1):
_loc2_ = 773625421
_loc3_ = self.ror(param1, _loc2_ % 13)
_loc3_ = _loc3_ ^ _loc2_
_loc3_ = self.ror(_loc3_, _loc2_ % 17)
return _loc3_
# see M3U8Encryption class in KLetvPlayer.swf
@staticmethod
def decrypt_m3u8(encrypted_data):
if encrypted_data[:5].decode('utf-8').lower() != 'vc_01':
return encrypted_data
encrypted_data = encrypted_data[5:]
_loc4_ = bytearray()
while encrypted_data:
b = compat_ord(encrypted_data[0])
_loc4_.extend([b // 16, b & 0x0f])
encrypted_data = encrypted_data[1:]
idx = len(_loc4_) - 11
_loc4_ = _loc4_[idx:] + _loc4_[:idx]
_loc7_ = bytearray()
while _loc4_:
_loc7_.append(_loc4_[0] * 16 + _loc4_[1])
_loc4_ = _loc4_[2:]
return bytes(_loc7_)
def _real_extract(self, url):
media_id = self._match_id(url)
page = self._download_webpage(url, media_id)
params = {
'id': media_id,
'platid': 1,
'splatid': 101,
'format': 1,
'tkey': self.calc_time_key(int(time.time())),
'domain': 'www.letv.com'
}
play_json_req = sanitized_Request(
'http://api.letv.com/mms/out/video/playJson?' + compat_urllib_parse.urlencode(params)
)
cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
if cn_verification_proxy:
play_json_req.add_header('Ytdl-request-proxy', cn_verification_proxy)
play_json = self._download_json(
play_json_req,
media_id, 'Downloading playJson data')
# Check for errors
playstatus = play_json['playstatus']
if playstatus['status'] == 0:
flag = playstatus['flag']
if flag == 1:
msg = 'Country %s auth error' % playstatus['country']
else:
msg = 'Generic error. flag = %d' % flag
raise ExtractorError(msg, expected=True)
playurl = play_json['playurl']
formats = ['350', '1000', '1300', '720p', '1080p']
dispatch = playurl['dispatch']
urls = []
for format_id in formats:
if format_id in dispatch:
media_url = playurl['domain'][0] + dispatch[format_id][0]
media_url += '&' + compat_urllib_parse.urlencode({
'm3v': 1,
'format': 1,
'expect': 3,
'rateid': format_id,
})
nodes_data = self._download_json(
media_url, media_id,
'Download JSON metadata for format %s' % format_id)
req = self._request_webpage(
nodes_data['nodelist'][0]['location'], media_id,
note='Downloading m3u8 information for format %s' % format_id)
m3u8_data = self.decrypt_m3u8(req.read())
url_info_dict = {
'url': encode_data_uri(m3u8_data, 'application/vnd.apple.mpegurl'),
'ext': determine_ext(dispatch[format_id][1]),
'format_id': format_id,
'protocol': 'm3u8',
}
if format_id[-1:] == 'p':
url_info_dict['height'] = int_or_none(format_id[:-1])
urls.append(url_info_dict)
publish_time = parse_iso8601(self._html_search_regex(
r'发布时间 ([^<>]+) ', page, 'publish time', default=None),
delimiter=' ', timezone=datetime.timedelta(hours=8))
description = self._html_search_meta('description', page, fatal=False)
return {
'id': media_id,
'formats': urls,
'title': playurl['title'],
'thumbnail': playurl['pic'],
'description': description,
'timestamp': publish_time,
}
class LetvTvIE(InfoExtractor):
_VALID_URL = r'http://www.letv.com/tv/(?P<id>\d+).html'
_TESTS = [{
'url': 'http://www.letv.com/tv/46177.html',
'info_dict': {
'id': '46177',
'title': '美人天下',
'description': 'md5:395666ff41b44080396e59570dbac01c'
},
'playlist_count': 35
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
page = self._download_webpage(url, playlist_id)
media_urls = list(set(re.findall(
r'http://www.letv.com/ptv/vplay/\d+.html', page)))
entries = [self.url_result(media_url, ie='Letv')
for media_url in media_urls]
title = self._html_search_meta('keywords', page,
fatal=False).split(',')[0]
description = self._html_search_meta('description', page, fatal=False)
return self.playlist_result(entries, playlist_id, playlist_title=title,
playlist_description=description)
class LetvPlaylistIE(LetvTvIE):
_VALID_URL = r'http://tv.letv.com/[a-z]+/(?P<id>[a-z]+)/index.s?html'
_TESTS = [{
'url': 'http://tv.letv.com/izt/wuzetian/index.html',
'info_dict': {
'id': 'wuzetian',
'title': '武媚娘传奇',
'description': 'md5:e12499475ab3d50219e5bba00b3cb248'
},
# This playlist contains some extra videos other than the drama itself
'playlist_mincount': 96
}, {
'url': 'http://tv.letv.com/pzt/lswjzzjc/index.shtml',
'info_dict': {
'id': 'lswjzzjc',
# The title should be "劲舞青春", but I can't find a simple way to
# determine the playlist title
'title': '乐视午间自制剧场',
'description': 'md5:b1eef244f45589a7b5b1af9ff25a4489'
},
'playlist_mincount': 7
}]
| 33.231405 | 97 | 0.541781 |
8b9b52f9da47ba8ef26ddd0fff6817009ea2b00d
| 7,311 |
py
|
Python
|
autobahn/wamp/test/test_wamp_cryptosign.py
|
artynet/autobahn-python
|
7d4e8121d7949142de2b4c4e12fe92d5f8be2d36
|
[
"MIT"
] | 1,670 |
2015-10-12T15:46:22.000Z
|
2022-03-30T22:12:53.000Z
|
autobahn/wamp/test/test_wamp_cryptosign.py
|
artynet/autobahn-python
|
7d4e8121d7949142de2b4c4e12fe92d5f8be2d36
|
[
"MIT"
] | 852 |
2015-10-16T22:11:03.000Z
|
2022-03-27T07:57:01.000Z
|
autobahn/wamp/test/test_wamp_cryptosign.py
|
artynet/autobahn-python
|
7d4e8121d7949142de2b4c4e12fe92d5f8be2d36
|
[
"MIT"
] | 790 |
2015-10-15T08:46:12.000Z
|
2022-03-30T12:22:13.000Z
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import hashlib
import os
import binascii
from unittest.mock import Mock
import txaio
if os.environ.get('USE_TWISTED', False):
txaio.use_twisted()
from twisted.trial import unittest
elif os.environ.get('USE_ASYNCIO', False):
txaio.use_asyncio()
import unittest
else:
raise Exception('no networking framework selected')
from autobahn.wamp.cryptosign import _makepad, HAS_CRYPTOSIGN
from autobahn.wamp import types
from autobahn.wamp.auth import create_authenticator
if HAS_CRYPTOSIGN:
from autobahn.wamp.cryptosign import SigningKey
from nacl.encoding import HexEncoder
import tempfile
keybody = '''-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACAa38i/4dNWFuZN/72QAJbyOwZvkUyML/u2b2B1uW4RbQAAAJj4FLyB+BS8
gQAAAAtzc2gtZWQyNTUxOQAAACAa38i/4dNWFuZN/72QAJbyOwZvkUyML/u2b2B1uW4RbQ
AAAEBNV9l6aPVVaWYgpthJwM5YJWhRjXKet1PcfHMt4oBFEBrfyL/h01YW5k3/vZAAlvI7
Bm+RTIwv+7ZvYHW5bhFtAAAAFXNvbWV1c2VyQGZ1bmt0aGF0LmNvbQ==
-----END OPENSSH PRIVATE KEY-----'''
pubkey = '''ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJVp3hjHwIQyEladzd8mFcf0YSXcmyKS3qMLB7VqTQKm [email protected]
'''
# test valid vectors for WAMP-cryptosign signature testing
testvectors = [
{
'priv_key': '4d57d97a68f555696620a6d849c0ce582568518d729eb753dc7c732de2804510',
'challenge': 'ff' * 32,
'signature': '9b6f41540c9b95b4b7b281c3042fa9c54cef43c842d62ea3fd6030fcb66e70b3e80d49d44c29d1635da9348d02ec93f3ed1ef227dfb59a07b580095c2b82f80f9d16ca518aa0c2b707f2b2a609edeca73bca8dd59817a633f35574ac6fd80d00'
},
{
'priv_key': 'd511fe78e23934b3dadb52fcd022974b80bd92bccc7c5cf404e46cc0a8a2f5cd',
'challenge': 'b26c1f87c13fc1da14997f1b5a71995dff8fbe0a62fae8473c7bdbd05bfb607d',
'signature': '305aaa3ac25e98f651427688b3fc43fe7d8a68a7ec1d7d61c61517c519bd4a427c3015599d83ca28b4c652333920223844ef0725eb5dc2febfd6af7677b73f01d0852a29b460fc92ec943242ac638a053bbacc200512b18b30d15083cbdc9282'
},
{
'priv_key': '6e1fde9cf9e2359a87420b65a87dc0c66136e66945196ba2475990d8a0c3a25b',
'challenge': 'b05e6b8ad4d69abf74aa3be3c0ee40ae07d66e1895b9ab09285a2f1192d562d2',
'signature': 'ee3c7644fd8070532bc1fde3d70d742267da545d8c8f03e63bda63f1ad4214f4d2c4bfdb4eb9526def42deeb7e31602a6ff99eba893e0a4ad4d45892ca75e608d2b75e24a189a7f78ca776ba36fc53f6c3e31c32f251f2c524f0a44202f2902d'
}
]
class TestAuth(unittest.TestCase):
def setUp(self):
self.key = SigningKey.from_ssh_data(keybody)
self.privkey_hex = self.key._key.encode(encoder=HexEncoder)
m = hashlib.sha256()
m.update("some TLS message".encode())
self.channel_id = m.digest()
def test_valid(self):
session = Mock()
session._transport.get_channel_id = Mock(return_value=self.channel_id)
challenge = types.Challenge("ticket", dict(challenge="ff" * 32))
f_signed = self.key.sign_challenge(session, challenge)
def success(signed):
self.assertEqual(
192,
len(signed),
)
self.assertEqual(
'9b6f41540c9b95b4b7b281c3042fa9c54cef43c842d62ea3fd6030fcb66e70b3e80d49d44c29d1635da9348d02ec93f3ed1ef227dfb59a07b580095c2b82f80f9d16ca518aa0c2b707f2b2a609edeca73bca8dd59817a633f35574ac6fd80d00',
signed,
)
def failed(err):
self.fail(str(err))
txaio.add_callbacks(f_signed, success, failed)
def test_testvectors(self):
session = Mock()
session._transport.get_channel_id = Mock(return_value=self.channel_id)
for testvec in testvectors:
priv_key = SigningKey.from_key_bytes(binascii.a2b_hex(testvec['priv_key']))
challenge = types.Challenge("ticket", dict(challenge=testvec['challenge']))
f_signed = priv_key.sign_challenge(session, challenge)
def success(signed):
self.assertEqual(
192,
len(signed),
)
self.assertEqual(
testvec['signature'],
signed,
)
def failed(err):
self.fail(str(err))
txaio.add_callbacks(f_signed, success, failed)
def test_authenticator(self):
authenticator = create_authenticator(
"cryptosign",
authid="someone",
privkey=self.privkey_hex,
)
session = Mock()
session._transport.get_channel_id = Mock(return_value=self.channel_id)
challenge = types.Challenge("cryptosign", dict(challenge="ff" * 32))
f_reply = authenticator.on_challenge(session, challenge)
def success(reply):
self.assertEqual(
reply,
'9b6f41540c9b95b4b7b281c3042fa9c54cef43c842d62ea3fd6030fcb66e70b3e80d49d44c29d1635da9348d02ec93f3ed1ef227dfb59a07b580095c2b82f80f9d16ca518aa0c2b707f2b2a609edeca73bca8dd59817a633f35574ac6fd80d00',
)
def failed(err):
self.fail(str(err))
txaio.add_callbacks(f_reply, success, failed)
class TestKey(unittest.TestCase):
def test_pad(self):
self.assertEqual(_makepad(0), '')
self.assertEqual(_makepad(2), '\x01\x02')
self.assertEqual(_makepad(3), '\x01\x02\x03')
def test_key(self):
with tempfile.NamedTemporaryFile('w+t') as fp:
fp.write(keybody)
fp.seek(0)
key = SigningKey.from_ssh_key(fp.name)
self.assertEqual(key.public_key(), '1adfc8bfe1d35616e64dffbd900096f23b066f914c8c2ffbb66f6075b96e116d')
def test_pubkey(self):
with tempfile.NamedTemporaryFile('w+t') as fp:
fp.write(pubkey)
fp.seek(0)
key = SigningKey.from_ssh_key(fp.name)
self.assertEqual(key.public_key(), '9569de18c7c0843212569dcddf2615c7f46125dc9b2292dea30b07b56a4d02a6')
self.assertEqual(key.comment(), '[email protected]')
| 39.733696 | 215 | 0.701135 |
4f6deeb61740a243c9e6856c1df3a5c14c664037
| 1,049 |
py
|
Python
|
readthedocs/doc_builder/constants.py
|
agarwalrounak/readthedocs.org
|
4911600c230809bd6fb3585d1903121db2928ad6
|
[
"MIT"
] | 10 |
2019-05-21T03:00:40.000Z
|
2022-03-12T11:24:39.000Z
|
readthedocs/doc_builder/constants.py
|
agarwalrounak/readthedocs.org
|
4911600c230809bd6fb3585d1903121db2928ad6
|
[
"MIT"
] | 12 |
2019-12-05T04:47:01.000Z
|
2022-01-09T00:56:58.000Z
|
readthedocs/doc_builder/constants.py
|
agarwalrounak/readthedocs.org
|
4911600c230809bd6fb3585d1903121db2928ad6
|
[
"MIT"
] | 5 |
2019-07-08T23:45:10.000Z
|
2021-02-26T07:29:49.000Z
|
# -*- coding: utf-8 -*-
"""Doc build constants."""
import logging
import os
import re
from django.conf import settings
log = logging.getLogger(__name__)
MKDOCS_TEMPLATE_DIR = os.path.join(
settings.SITE_ROOT,
'readthedocs',
'templates',
'mkdocs',
)
PDF_RE = re.compile('Output written on (.*?)')
# Docker
DOCKER_SOCKET = getattr(
settings,
'DOCKER_SOCKET',
'unix:///var/run/docker.sock',
)
DOCKER_VERSION = getattr(settings, 'DOCKER_VERSION', 'auto')
DOCKER_IMAGE = getattr(settings, 'DOCKER_IMAGE', 'readthedocs/build:2.0')
DOCKER_IMAGE_SETTINGS = getattr(settings, 'DOCKER_IMAGE_SETTINGS', {})
old_config = getattr(settings, 'DOCKER_BUILD_IMAGES', None)
if old_config:
log.warning(
'Old config detected, DOCKER_BUILD_IMAGES->DOCKER_IMAGE_SETTINGS',
)
DOCKER_IMAGE_SETTINGS.update(old_config)
DOCKER_LIMITS = {'memory': '200m', 'time': 600}
DOCKER_LIMITS.update(getattr(settings, 'DOCKER_LIMITS', {}))
DOCKER_TIMEOUT_EXIT_CODE = 42
DOCKER_OOM_EXIT_CODE = 137
DOCKER_HOSTNAME_MAX_LEN = 64
| 22.319149 | 74 | 0.720686 |
65d125adeccea2d0ff8d39d8340be3e28b3bff80
| 1,268 |
py
|
Python
|
release/scripts/templates_py/gizmo_simple.py
|
noorbeast/blender
|
9dc69b3848b46f4fbf3daa3360a3b975f4e1565f
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 365 |
2015-02-10T15:10:55.000Z
|
2022-03-03T15:50:51.000Z
|
release/scripts/templates_py/gizmo_simple.py
|
noorbeast/blender
|
9dc69b3848b46f4fbf3daa3360a3b975f4e1565f
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 45 |
2015-01-09T15:34:20.000Z
|
2021-10-05T14:44:23.000Z
|
release/scripts/templates_py/gizmo_simple.py
|
noorbeast/blender
|
9dc69b3848b46f4fbf3daa3360a3b975f4e1565f
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 172 |
2015-01-25T15:16:53.000Z
|
2022-01-31T08:25:36.000Z
|
# Example of a group that edits a single property
# using the predefined gizmo arrow.
#
# Usage: Select a light in the 3D view and drag the arrow at it's rear
# to change it's energy value.
#
import bpy
from bpy.types import (
GizmoGroup,
)
class MyLightWidgetGroup(GizmoGroup):
bl_idname = "OBJECT_GGT_light_test"
bl_label = "Test Light Widget"
bl_space_type = 'VIEW_3D'
bl_region_type = 'WINDOW'
bl_options = {'3D', 'PERSISTENT'}
@classmethod
def poll(cls, context):
ob = context.object
return (ob and ob.type == 'LIGHT')
def setup(self, context):
# Arrow gizmo has one 'offset' property we can assign to the light energy.
ob = context.object
gz = self.gizmos.new("GIZMO_GT_arrow_3d")
gz.target_set_prop("offset", ob.data, "energy")
gz.matrix_basis = ob.matrix_world.normalized()
gz.draw_style = 'BOX'
gz.color = 1.0, 0.5, 0.0
gz.alpha = 0.5
gz.color_highlight = 1.0, 0.5, 1.0
gz.alpha_highlight = 0.5
self.energy_gizmo = gz
def refresh(self, context):
ob = context.object
gz = self.energy_gizmo
gz.matrix_basis = ob.matrix_world.normalized()
bpy.utils.register_class(MyLightWidgetGroup)
| 26.416667 | 82 | 0.643533 |
e7127f6814c7ee9326b2233d8a84cf10f4bd314c
| 567 |
py
|
Python
|
scripts/experiments/data-processing/sns_settings.py
|
aoli-al/JQF-1
|
48d449c52314f17b499f24c81e3512a5b3161d0e
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/experiments/data-processing/sns_settings.py
|
aoli-al/JQF-1
|
48d449c52314f17b499f24c81e3512a5b3161d0e
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/experiments/data-processing/sns_settings.py
|
aoli-al/JQF-1
|
48d449c52314f17b499f24c81e3512a5b3161d0e
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style("whitegrid", {'axes.grid' : True})
sns.set_context("paper", font_scale=1.5)
plt.rcParams.update({'axes.edgecolor': 'black', 'axes.linewidth': 2,
'axes.grid': True, 'grid.linestyle': '--'})
colors = ['#2A587A', '#FABC75', '#83B828', '#F83A25', '#FDD8EB']
sns.palplot(colors)
sns.set_palette(sns.color_palette(colors), 8, .75)
sub_figure_title = {"fontweight": 700, 'fontname':'Times New Roman', 'fontsize': 18}
plt.tight_layout()
| 35.4375 | 84 | 0.659612 |
22e39622f7f33e1e852353bb2bfa6693e897de2b
| 5,579 |
py
|
Python
|
simulaqron/run/run.py
|
qfizik/SimulaQron
|
aadba8bb2cb31edd9fdeb2a95af844b6bd4326a9
|
[
"BSD-3-Clause"
] | 25 |
2017-11-20T08:50:12.000Z
|
2018-07-31T19:02:19.000Z
|
simulaqron/run/run.py
|
qfizik/SimulaQron
|
aadba8bb2cb31edd9fdeb2a95af844b6bd4326a9
|
[
"BSD-3-Clause"
] | 23 |
2017-11-21T21:47:28.000Z
|
2018-10-03T08:28:41.000Z
|
simulaqron/run/run.py
|
qfizik/SimulaQron
|
aadba8bb2cb31edd9fdeb2a95af844b6bd4326a9
|
[
"BSD-3-Clause"
] | 13 |
2017-11-20T08:50:14.000Z
|
2018-09-01T21:44:00.000Z
|
import logging
import os
from concurrent.futures import ProcessPoolExecutor as Pool
from importlib import reload
from time import sleep
from netqasm.logging.glob import get_netqasm_logger
from netqasm.logging.output import (reset_struct_loggers,
save_all_struct_loggers)
from netqasm.runtime import env, process_logs
from netqasm.runtime.app_config import AppConfig
from netqasm.runtime.application import ApplicationInstance
from netqasm.runtime.settings import Formalism
from netqasm.sdk.classical_communication import reset_socket_hub
from netqasm.sdk.config import LogConfig
from netqasm.sdk.shared_memory import SharedMemoryManager
from netqasm.util.yaml import dump_yaml
from simulaqron.network import Network
from simulaqron.settings import SimBackend, simulaqron_settings
from simulaqron.toolbox import has_module
logger = get_netqasm_logger()
# TODO similar code to squidasm.run.run, make base-class and subclasses?
_SIMULAQRON_BACKENDS = {
Formalism.STAB: SimBackend.STABILIZER,
Formalism.KET: SimBackend.PROJECTQ,
Formalism.DM: SimBackend.QUTIP,
}
def as_completed(futures, names=None, sleep_time=0):
futures = list(futures)
if names is not None:
names = list(names)
while len(futures) > 0:
for i, future in enumerate(futures):
if future.done():
futures.pop(i)
if names is None:
yield future
else:
name = names.pop(i)
yield future, name
if sleep_time > 0:
sleep(sleep_time)
def reset(save_loggers=False):
if save_loggers:
save_all_struct_loggers()
SharedMemoryManager.reset_memories()
reset_socket_hub()
reset_struct_loggers()
# Reset logging
logging.shutdown()
reload(logging)
def check_sim_backend(sim_backend):
if sim_backend in [SimBackend.PROJECTQ, SimBackend.QUTIP]:
assert has_module.main(sim_backend.value), f"To use {sim_backend} as backend you need to install the package"
def run_sim_backend(node_names, sim_backend):
logger.debug(f"Starting simulaqron sim_backend process with nodes {node_names}")
check_sim_backend(sim_backend=sim_backend)
simulaqron_settings.sim_backend = sim_backend.value
network = Network(name="default", nodes=node_names, force=True, new=True)
network.start()
return network
def run_applications(
app_instance: ApplicationInstance,
num_rounds=1,
network_cfg=None,
log_cfg=None,
results_file=None,
formalism=Formalism.KET,
post_function=None,
flavour=None,
enable_logging=True,
hardware=None,
use_app_config=True, # whether to give app_config as argument to app's main()
):
"""Executes functions containing application scripts,
Parameters
----------
applications : dict
Keys should be names of nodes
Values should be the functions
"""
# app_names = [app_cfg.app_name for app_cfg in app_cfgs]
app_names = [program.party for program in app_instance.app.programs]
sim_backend = _SIMULAQRON_BACKENDS[formalism]
if enable_logging:
log_cfg = LogConfig() if log_cfg is None else log_cfg
app_instance.logging_cfg = log_cfg
log_dir = (
os.path.abspath("./log") if log_cfg.log_dir is None else log_cfg.log_dir
)
if not os.path.exists(log_dir):
os.mkdir(log_dir)
timed_log_dir = env.get_timed_log_dir(log_dir)
app_instance.logging_cfg.log_subroutines_dir = timed_log_dir
app_instance.logging_cfg.comm_log_dir = timed_log_dir
with Pool(len(app_names)) as executor:
# Start the backend process
network = run_sim_backend(app_names, sim_backend=sim_backend)
# Start the application processes
app_futures = []
programs = app_instance.app.programs
for program in programs:
inputs = app_instance.program_inputs[program.party]
if use_app_config:
app_cfg = AppConfig(
app_name=program.party,
node_name=program.party, # node name should be same as app name
main_func=program.entry,
log_config=app_instance.logging_cfg,
inputs=inputs,
)
inputs["app_config"] = app_cfg
future = executor.submit(program.entry, **inputs)
app_futures.append(future)
# for app_cfg in app_cfgs:
# inputs = app_cfg.inputs
# if use_app_config:
# inputs['app_config'] = app_cfg
# future = executor.submit(app_cfg.main_func, **inputs)
# app_futures.append(future)
# Join the application processes and the backend
names = [f'app_{app_name}' for app_name in app_names]
results = {}
for future, name in as_completed(app_futures, names=names):
results[name] = future.result()
# if results_file is not None:
# save_results(results=results, results_file=results_file)
if enable_logging:
assert timed_log_dir is not None
path = os.path.join(timed_log_dir, "results.yaml")
dump_yaml(data=results, file_path=path)
network.stop()
if enable_logging:
process_logs.make_last_log(log_dir=timed_log_dir)
reset(save_loggers=True)
return [results]
def save_results(results, results_file):
dump_yaml(data=results, file_path=results_file)
| 33.407186 | 117 | 0.673777 |
6fca2143dfe12d62a52c79cded836b8ffa7a4f83
| 26,723 |
py
|
Python
|
keystone/contrib/federation/utils.py
|
yanheven/keystone
|
417b8941095f40674575ed951b4a03ebcdc91fef
|
[
"Apache-2.0"
] | null | null | null |
keystone/contrib/federation/utils.py
|
yanheven/keystone
|
417b8941095f40674575ed951b4a03ebcdc91fef
|
[
"Apache-2.0"
] | null | null | null |
keystone/contrib/federation/utils.py
|
yanheven/keystone
|
417b8941095f40674575ed951b4a03ebcdc91fef
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for Federation Extension."""
import ast
import re
import jsonschema
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import six
from keystone.contrib import federation
from keystone import exception
from keystone.i18n import _, _LW
CONF = cfg.CONF
LOG = log.getLogger(__name__)
MAPPING_SCHEMA = {
"type": "object",
"required": ['rules'],
"properties": {
"rules": {
"minItems": 1,
"type": "array",
"items": {
"type": "object",
"required": ['local', 'remote'],
"additionalProperties": False,
"properties": {
"local": {
"type": "array"
},
"remote": {
"minItems": 1,
"type": "array",
"items": {
"type": "object",
"oneOf": [
{"$ref": "#/definitions/empty"},
{"$ref": "#/definitions/any_one_of"},
{"$ref": "#/definitions/not_any_of"},
{"$ref": "#/definitions/blacklist"},
{"$ref": "#/definitions/whitelist"}
],
}
}
}
}
}
},
"definitions": {
"empty": {
"type": "object",
"required": ['type'],
"properties": {
"type": {
"type": "string"
},
},
"additionalProperties": False,
},
"any_one_of": {
"type": "object",
"additionalProperties": False,
"required": ['type', 'any_one_of'],
"properties": {
"type": {
"type": "string"
},
"any_one_of": {
"type": "array"
},
"regex": {
"type": "boolean"
}
}
},
"not_any_of": {
"type": "object",
"additionalProperties": False,
"required": ['type', 'not_any_of'],
"properties": {
"type": {
"type": "string"
},
"not_any_of": {
"type": "array"
},
"regex": {
"type": "boolean"
}
}
},
"blacklist": {
"type": "object",
"additionalProperties": False,
"required": ['type', 'blacklist'],
"properties": {
"type": {
"type": "string"
},
"blacklist": {
"type": "array"
}
}
},
"whitelist": {
"type": "object",
"additionalProperties": False,
"required": ['type', 'whitelist'],
"properties": {
"type": {
"type": "string"
},
"whitelist": {
"type": "array"
}
}
}
}
}
class DirectMaps(object):
"""An abstraction around the remote matches.
Each match is treated internally as a list.
"""
def __init__(self):
self._matches = []
def add(self, values):
"""Adds a matched value to the list of matches.
:param list value: the match to save
"""
self._matches.append(values)
def __getitem__(self, idx):
"""Used by Python when executing ``''.format(*DirectMaps())``."""
value = self._matches[idx]
if isinstance(value, list) and len(value) == 1:
return value[0]
else:
return value
def validate_mapping_structure(ref):
v = jsonschema.Draft4Validator(MAPPING_SCHEMA)
messages = ''
for error in sorted(v.iter_errors(ref), key=str):
messages = messages + error.message + "\n"
if messages:
raise exception.ValidationError(messages)
def validate_expiration(token_ref):
if timeutils.utcnow() > token_ref.expires:
raise exception.Unauthorized(_('Federation token is expired'))
def validate_groups_cardinality(group_ids, mapping_id):
"""Check if groups list is non-empty.
:param group_ids: list of group ids
:type group_ids: list of str
:raises exception.MissingGroups: if ``group_ids`` cardinality is 0
"""
if not group_ids:
raise exception.MissingGroups(mapping_id=mapping_id)
def get_remote_id_parameter(protocol):
# NOTE(marco-fargetta): Since we support any protocol ID, we attempt to
# retrieve the remote_id_attribute of the protocol ID. If it's not
# registered in the config, then register the option and try again.
# This allows the user to register protocols other than oidc and saml2.
remote_id_parameter = None
try:
remote_id_parameter = CONF[protocol]['remote_id_attribute']
except AttributeError:
CONF.register_opt(cfg.StrOpt('remote_id_attribute'),
group=protocol)
try:
remote_id_parameter = CONF[protocol]['remote_id_attribute']
except AttributeError:
pass
if not remote_id_parameter:
LOG.debug('Cannot find "remote_id_attribute" in configuration '
'group %s. Trying default location in '
'group federation.', protocol)
remote_id_parameter = CONF.federation.remote_id_attribute
return remote_id_parameter
def validate_idp(idp, protocol, assertion):
"""Validate the IdP providing the assertion is registered for the mapping.
"""
remote_id_parameter = get_remote_id_parameter(protocol)
if not remote_id_parameter or not idp['remote_ids']:
LOG.debug('Impossible to identify the IdP %s ', idp['id'])
# If nothing is defined, the administrator may want to
# allow the mapping of every IdP
return
try:
idp_remote_identifier = assertion[remote_id_parameter]
except KeyError:
msg = _('Could not find Identity Provider identifier in '
'environment')
raise exception.ValidationError(msg)
if idp_remote_identifier not in idp['remote_ids']:
msg = _('Incoming identity provider identifier not included '
'among the accepted identifiers.')
raise exception.Forbidden(msg)
def validate_groups_in_backend(group_ids, mapping_id, identity_api):
"""Iterate over group ids and make sure they are present in the backend/
This call is not transactional.
:param group_ids: IDs of the groups to be checked
:type group_ids: list of str
:param mapping_id: id of the mapping used for this operation
:type mapping_id: str
:param identity_api: Identity Manager object used for communication with
backend
:type identity_api: identity.Manager
:raises: exception.MappedGroupNotFound
"""
for group_id in group_ids:
try:
identity_api.get_group(group_id)
except exception.GroupNotFound:
raise exception.MappedGroupNotFound(
group_id=group_id, mapping_id=mapping_id)
def validate_groups(group_ids, mapping_id, identity_api):
"""Check group ids cardinality and check their existence in the backend.
This call is not transactional.
:param group_ids: IDs of the groups to be checked
:type group_ids: list of str
:param mapping_id: id of the mapping used for this operation
:type mapping_id: str
:param identity_api: Identity Manager object used for communication with
backend
:type identity_api: identity.Manager
:raises: exception.MappedGroupNotFound
:raises: exception.MissingGroups
"""
validate_groups_cardinality(group_ids, mapping_id)
validate_groups_in_backend(group_ids, mapping_id, identity_api)
# TODO(marek-denis): Optimize this function, so the number of calls to the
# backend are minimized.
def transform_to_group_ids(group_names, mapping_id,
identity_api, assignment_api):
"""Transform groups identitified by name/domain to their ids
Function accepts list of groups identified by a name and domain giving
a list of group ids in return.
Example of group_names parameter::
[
{
"name": "group_name",
"domain": {
"id": "domain_id"
},
},
{
"name": "group_name_2",
"domain": {
"name": "domain_name"
}
}
]
:param group_names: list of group identified by name and its domain.
:type group_names: list
:param mapping_id: id of the mapping used for mapping assertion into
local credentials
:type mapping_id: str
:param identity_api: identity_api object
:param assignment_api: assignment_api object
:returns: generator object with group ids
:raises: excepton.MappedGroupNotFound: in case asked group doesn't
exist in the backend.
"""
def resolve_domain(domain):
"""Return domain id.
Input is a dictionary with a domain identified either by a ``id`` or a
``name``. In the latter case system will attempt to fetch domain object
from the backend.
:returns: domain's id
:rtype: str
"""
domain_id = (domain.get('id') or
assignment_api.get_domain_by_name(
domain.get('name')).get('id'))
return domain_id
for group in group_names:
try:
group_dict = identity_api.get_group_by_name(
group['name'], resolve_domain(group['domain']))
yield group_dict['id']
except exception.GroupNotFound:
LOG.debug('Skip mapping group %s; has no entry in the backend',
group['name'])
def get_assertion_params_from_env(context):
LOG.debug('Environment variables: %s', context['environment'])
prefix = CONF.federation.assertion_prefix
for k, v in context['environment'].items():
if k.startswith(prefix):
yield (k, v)
class UserType(object):
"""User mapping type."""
EPHEMERAL = 'ephemeral'
LOCAL = 'local'
class RuleProcessor(object):
"""A class to process assertions and mapping rules."""
class _EvalType(object):
"""Mapping rule evaluation types."""
ANY_ONE_OF = 'any_one_of'
NOT_ANY_OF = 'not_any_of'
BLACKLIST = 'blacklist'
WHITELIST = 'whitelist'
def __init__(self, rules):
"""Initialize RuleProcessor.
Example rules can be found at:
:class:`keystone.tests.mapping_fixtures`
:param rules: rules from a mapping
:type rules: dict
"""
self.rules = rules
def process(self, assertion_data):
"""Transform assertion to a dictionary of user name and group ids
based on mapping rules.
This function will iterate through the mapping rules to find
assertions that are valid.
:param assertion_data: an assertion containing values from an IdP
:type assertion_data: dict
Example assertion_data::
{
'Email': '[email protected]',
'UserName': 'testacct',
'FirstName': 'Test',
'LastName': 'Account',
'orgPersonType': 'Tester'
}
:returns: dictionary with user and group_ids
The expected return structure is::
{
'name': 'foobar',
'group_ids': ['abc123', 'def456'],
'group_names': [
{
'name': 'group_name_1',
'domain': {
'name': 'domain1'
}
},
{
'name': 'group_name_1_1',
'domain': {
'name': 'domain1'
}
},
{
'name': 'group_name_2',
'domain': {
'id': 'xyz132'
}
}
]
}
"""
# Assertions will come in as string key-value pairs, and will use a
# semi-colon to indicate multiple values, i.e. groups.
# This will create a new dictionary where the values are arrays, and
# any multiple values are stored in the arrays.
LOG.debug('assertion data: %s', assertion_data)
assertion = {n: v.split(';') for n, v in assertion_data.items()
if isinstance(v, six.string_types)}
LOG.debug('assertion: %s', assertion)
identity_values = []
LOG.debug('rules: %s', self.rules)
for rule in self.rules:
direct_maps = self._verify_all_requirements(rule['remote'],
assertion)
# If the compare comes back as None, then the rule did not apply
# to the assertion data, go on to the next rule
if direct_maps is None:
continue
# If there are no direct mappings, then add the local mapping
# directly to the array of saved values. However, if there is
# a direct mapping, then perform variable replacement.
if not direct_maps:
identity_values += rule['local']
else:
for local in rule['local']:
new_local = self._update_local_mapping(local, direct_maps)
identity_values.append(new_local)
LOG.debug('identity_values: %s', identity_values)
mapped_properties = self._transform(identity_values)
LOG.debug('mapped_properties: %s', mapped_properties)
return mapped_properties
def _transform(self, identity_values):
"""Transform local mappings, to an easier to understand format.
Transform the incoming array to generate the return value for
the process function. Generating content for Keystone tokens will
be easier if some pre-processing is done at this level.
:param identity_values: local mapping from valid evaluations
:type identity_values: array of dict
Example identity_values::
[
{
'group': {'id': '0cd5e9'},
'user': {
'email': '[email protected]'
},
},
{
'groups': ['member', 'admin', tester'],
'domain': {
'name': 'default_domain'
}
}
]
:returns: dictionary with user name, group_ids and group_names.
:rtype: dict
"""
def extract_groups(groups_by_domain):
for groups in groups_by_domain.values():
for group in {g['name']: g for g in groups}.values():
yield group
def normalize_user(user):
"""Parse and validate user mapping."""
user_type = user.get('type')
if user_type and user_type not in (UserType.EPHEMERAL,
UserType.LOCAL):
msg = _("User type %s not supported") % user_type
raise exception.ValidationError(msg)
if user_type is None:
user_type = user['type'] = UserType.EPHEMERAL
if user_type == UserType.EPHEMERAL:
user['domain'] = {
'id': (CONF.federation.federated_domain_name or
federation.FEDERATED_DOMAIN_KEYWORD)
}
# initialize the group_ids as a set to eliminate duplicates
user = {}
group_ids = set()
group_names = list()
groups_by_domain = dict()
for identity_value in identity_values:
if 'user' in identity_value:
# if a mapping outputs more than one user name, log it
if user:
LOG.warning(_LW('Ignoring user name'))
else:
user = identity_value.get('user')
if 'group' in identity_value:
group = identity_value['group']
if 'id' in group:
group_ids.add(group['id'])
elif 'name' in group:
domain = (group['domain'].get('name') or
group['domain'].get('id'))
groups_by_domain.setdefault(domain, list()).append(group)
group_names.extend(extract_groups(groups_by_domain))
if 'groups' in identity_value:
if 'domain' not in identity_value:
msg = _("Invalid rule: %(identity_value)s. Both 'groups' "
"and 'domain' keywords must be specified.")
msg = msg % {'identity_value': identity_value}
raise exception.ValidationError(msg)
# In this case, identity_value['groups'] is a string
# representation of a list, and we want a real list. This is
# due to the way we do direct mapping substitutions today (see
# function _update_local_mapping() )
try:
group_names_list = ast.literal_eval(
identity_value['groups'])
except ValueError:
group_names_list = [identity_value['groups']]
domain = identity_value['domain']
group_dicts = [{'name': name, 'domain': domain} for name in
group_names_list]
group_names.extend(group_dicts)
normalize_user(user)
return {'user': user,
'group_ids': list(group_ids),
'group_names': group_names}
def _update_local_mapping(self, local, direct_maps):
"""Replace any {0}, {1} ... values with data from the assertion.
:param local: local mapping reference that needs to be updated
:type local: dict
:param direct_maps: identity values used to update local
:type direct_maps: keystone.contrib.federation.utils.DirectMaps
Example local::
{'user': {'name': '{0} {1}', 'email': '{2}'}}
Example direct_maps::
['Bob', 'Thompson', '[email protected]']
:returns: new local mapping reference with replaced values.
The expected return structure is::
{'user': {'name': 'Bob Thompson', 'email': '[email protected]'}}
"""
LOG.debug('direct_maps: %s', direct_maps)
LOG.debug('local: %s', local)
new = {}
for k, v in six.iteritems(local):
if isinstance(v, dict):
new_value = self._update_local_mapping(v, direct_maps)
else:
new_value = v.format(*direct_maps)
new[k] = new_value
return new
def _verify_all_requirements(self, requirements, assertion):
"""Go through the remote requirements of a rule, and compare against
the assertion.
If a value of ``None`` is returned, the rule with this assertion
doesn't apply.
If an array of zero length is returned, then there are no direct
mappings to be performed, but the rule is valid.
Otherwise, then it will first attempt to filter the values according
to blacklist or whitelist rules and finally return the values in
order, to be directly mapped.
:param requirements: list of remote requirements from rules
:type requirements: list
Example requirements::
[
{
"type": "UserName"
},
{
"type": "orgPersonType",
"any_one_of": [
"Customer"
]
},
{
"type": "ADFS_GROUPS",
"whitelist": [
"g1", "g2", "g3", "g4"
]
}
]
:param assertion: dict of attributes from an IdP
:type assertion: dict
Example assertion::
{
'UserName': ['testacct'],
'LastName': ['Account'],
'orgPersonType': ['Tester'],
'Email': ['[email protected]'],
'FirstName': ['Test'],
'ADFS_GROUPS': ['g1', 'g2']
}
:returns: identity values used to update local
:rtype: keystone.contrib.federation.utils.DirectMaps
"""
direct_maps = DirectMaps()
for requirement in requirements:
requirement_type = requirement['type']
regex = requirement.get('regex', False)
any_one_values = requirement.get(self._EvalType.ANY_ONE_OF)
if any_one_values is not None:
if self._evaluate_requirement(any_one_values,
requirement_type,
self._EvalType.ANY_ONE_OF,
regex,
assertion):
continue
else:
return None
not_any_values = requirement.get(self._EvalType.NOT_ANY_OF)
if not_any_values is not None:
if self._evaluate_requirement(not_any_values,
requirement_type,
self._EvalType.NOT_ANY_OF,
regex,
assertion):
continue
else:
return None
# If 'any_one_of' or 'not_any_of' are not found, then values are
# within 'type'. Attempt to find that 'type' within the assertion,
# and filter these values if 'whitelist' or 'blacklist' is set.
direct_map_values = assertion.get(requirement_type)
if direct_map_values:
blacklisted_values = requirement.get(self._EvalType.BLACKLIST)
whitelisted_values = requirement.get(self._EvalType.WHITELIST)
# If a blacklist or whitelist is used, we want to map to the
# whole list instead of just its values separately.
if blacklisted_values is not None:
direct_map_values = [v for v in direct_map_values
if v not in blacklisted_values]
elif whitelisted_values is not None:
direct_map_values = [v for v in direct_map_values
if v in whitelisted_values]
direct_maps.add(direct_map_values)
LOG.debug('updating a direct mapping: %s', direct_map_values)
return direct_maps
def _evaluate_values_by_regex(self, values, assertion_values):
for value in values:
for assertion_value in assertion_values:
if re.search(value, assertion_value):
return True
return False
def _evaluate_requirement(self, values, requirement_type,
eval_type, regex, assertion):
"""Evaluate the incoming requirement and assertion.
If the requirement type does not exist in the assertion data, then
return False. If regex is specified, then compare the values and
assertion values. Otherwise, grab the intersection of the values
and use that to compare against the evaluation type.
:param values: list of allowed values, defined in the requirement
:type values: list
:param requirement_type: key to look for in the assertion
:type requirement_type: string
:param eval_type: determine how to evaluate requirements
:type eval_type: string
:param regex: perform evaluation with regex
:type regex: boolean
:param assertion: dict of attributes from the IdP
:type assertion: dict
:returns: boolean, whether requirement is valid or not.
"""
assertion_values = assertion.get(requirement_type)
if not assertion_values:
return False
if regex:
any_match = self._evaluate_values_by_regex(values,
assertion_values)
else:
any_match = bool(set(values).intersection(set(assertion_values)))
if any_match and eval_type == self._EvalType.ANY_ONE_OF:
return True
if not any_match and eval_type == self._EvalType.NOT_ANY_OF:
return True
return False
def assert_enabled_identity_provider(federation_api, idp_id):
identity_provider = federation_api.get_idp(idp_id)
if identity_provider.get('enabled') is not True:
msg = _('Identity Provider %(idp)s is disabled') % {'idp': idp_id}
LOG.debug(msg)
raise exception.Forbidden(msg)
def assert_enabled_service_provider_object(service_provider):
if service_provider.get('enabled') is not True:
sp_id = service_provider['id']
msg = _('Service Provider %(sp)s is disabled') % {'sp': sp_id}
LOG.debug(msg)
raise exception.Forbidden(msg)
| 33.998728 | 79 | 0.539198 |
776b2537f4a8f2fb3dae86f6ac8c531eacddea54
| 5,075 |
py
|
Python
|
io_scene_halo/file_tag/import_tag.py
|
SamDamDing/Halo-Asset-Blender-Development-Toolset
|
f81aba257c73b6cbce7805d1c7e424dba77fd761
|
[
"Unlicense"
] | 3 |
2020-05-04T15:58:24.000Z
|
2020-08-04T00:17:33.000Z
|
io_scene_halo/file_tag/import_tag.py
|
SamDamDing/Halo-Asset-Blender-Development-Toolset
|
f81aba257c73b6cbce7805d1c7e424dba77fd761
|
[
"Unlicense"
] | 18 |
2020-05-24T07:07:55.000Z
|
2020-08-24T20:34:14.000Z
|
io_scene_halo/file_tag/import_tag.py
|
SamDamDing/Halo-Asset-Blender-Development-Toolset
|
f81aba257c73b6cbce7805d1c7e424dba77fd761
|
[
"Unlicense"
] | 1 |
2020-08-13T06:33:51.000Z
|
2020-08-13T06:33:51.000Z
|
# ##### BEGIN MIT LICENSE BLOCK #####
#
# MIT License
#
# Copyright (c) 2022 Steven Garcia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ##### END MIT LICENSE BLOCK #####
import bpy
from ..global_functions import tag_format, mesh_processing, global_functions
from ..file_tag.file_model import build_scene as build_scene_model
from ..file_tag.file_physics import build_scene as build_scene_physics
from ..file_tag.file_animation import build_scene as build_scene_animation
from ..file_tag.file_collision import build_scene as build_scene_collision
from ..file_tag.file_structure_bsp import build_scene as build_scene_level
from ..file_tag.file_scenario import build_scene as build_scenario
from ..file_tag.file_camera_track import build_scene as build_camera_track
from ..file_tag.file_model.process_file_mode_retail import process_file_mode_retail as process_mode
from ..file_tag.file_model.process_file_mod2_retail import process_file_mod2_retail as process_mod2
from ..file_tag.file_collision.h1.process_file_retail import process_file_retail as process_collision_retail
from ..file_tag.file_collision.h2.process_file import process_file as process_h2_collision
from ..file_tag.file_physics.process_file_retail import process_file_retail as process_physics_retail
from ..file_tag.file_animation.process_file_retail import process_file_retail as process_animation_retail
from ..file_tag.file_structure_bsp.h1.process_file_retail import process_file_retail as process_level_retail
from ..file_tag.file_structure_bsp.h2.process_file import process_file_retail as process_h2_level
from ..file_tag.file_scenario.h2.process_file import process_file as process_scenario
from ..file_tag.file_camera_track.process_file_retail import process_file_retail as process_camera_track_retail
def load_file(context, file_path, fix_rotations, report):
input_stream = open(file_path, "rb")
if tag_format.check_file_size(input_stream) < 64: # Size of the header for all tags
input_stream.close()
report({'ERROR'}, "File size does not meet the minimum amount required. File is either not a tag or corrupted")
return {'CANCELLED'}
tag_group, group_is_valid = tag_format.check_group(input_stream)
if not group_is_valid:
input_stream.close()
report({'ERROR'}, "File does not have a valid tag class. Make sure you are importing a tag supported by the toolset")
return {'CANCELLED'}
if tag_group == "mode" or tag_group == "mod2":
build_scene = build_scene_model
if tag_group == "mode":
ASSET = process_mode(input_stream, tag_format, report)
else:
ASSET = process_mod2(input_stream, tag_format, report)
elif tag_group == "coll":
build_scene = build_scene_collision
ASSET = process_collision_retail(input_stream, tag_format, report)
elif tag_group == "lloc":
build_scene = build_scene_collision
ASSET = process_h2_collision(input_stream, tag_format, report)
elif tag_group == "phys":
build_scene = build_scene_physics
ASSET = process_physics_retail(input_stream, tag_format, report)
elif tag_group == "antr":
build_scene = build_scene_animation
ASSET = process_animation_retail(input_stream, tag_format, report)
elif tag_group == "sbsp":
build_scene = build_scene_level
ASSET = process_level_retail(input_stream, tag_format, report)
elif tag_group == "psbs":
build_scene = build_scene_level
ASSET = process_h2_level(input_stream, tag_format, report)
elif tag_group == "trak":
build_scene = build_camera_track
ASSET = process_camera_track_retail(input_stream, tag_format, report)
else:
input_stream.close()
report({'ERROR'}, "Not implemented")
return {'CANCELLED'}
input_stream.close()
build_scene.build_scene(context, ASSET, fix_rotations, report, mesh_processing, global_functions)
return {'FINISHED'}
if __name__ == '__main__':
bpy.ops.import_scene.model()
| 42.291667 | 125 | 0.763547 |
61a9be4a808b180b25222cdc80214ff3897191cb
| 4,729 |
py
|
Python
|
src/backend/common/queries/tests/mobile_client_query_test.py
|
770352/the-blue-alliance
|
af70d2e9f8fc3642fe939e928d0729db3a9b4cca
|
[
"MIT"
] | 266 |
2015-01-04T00:10:48.000Z
|
2022-03-28T18:42:05.000Z
|
src/backend/common/queries/tests/mobile_client_query_test.py
|
770352/the-blue-alliance
|
af70d2e9f8fc3642fe939e928d0729db3a9b4cca
|
[
"MIT"
] | 2,673 |
2015-01-01T20:14:33.000Z
|
2022-03-31T18:17:16.000Z
|
src/backend/common/queries/tests/mobile_client_query_test.py
|
ZachOrr/the-blue-alliance
|
b9a2e6e07374fb12c70f8fae1948bfe90e34adfe
|
[
"MIT"
] | 230 |
2015-01-04T00:10:48.000Z
|
2022-03-26T18:12:04.000Z
|
from typing import Callable, List, Optional
import pytest
from google.appengine.ext import ndb
from backend.common.consts.client_type import ClientType
from backend.common.models.account import Account
from backend.common.models.mobile_client import MobileClient
from backend.common.queries.mobile_client_query import MobileClientQuery
def _client(
user_id: str, client_type: ClientType = ClientType.OS_IOS, verified: bool = True
) -> Callable[[], MobileClient]:
def create_client():
client = MobileClient(
parent=ndb.Key(Account, user_id),
user_id=user_id,
messaging_id="token",
client_type=client_type,
device_uuid="uuid",
display_name="Phone",
verified=verified,
)
client.put()
return client
return create_client
@pytest.mark.parametrize(
"clients, user_ids, client_types, expected_users",
[
([_client("abc"), _client("efg", verified=False)], [], None, []),
([_client("abc"), _client("efg", verified=False)], ["abc"], [], []),
([_client("abc"), _client("efg", verified=False)], [], [], []),
([_client("abc"), _client("efg", verified=False)], ["abc"], None, ["abc"]),
([_client("abc"), _client("efg", verified=False)], ["efg"], None, []),
([_client("abc"), _client("efg")], ["abc"], None, ["abc"]),
([_client("abc"), _client("efg")], ["abc", "efg"], None, ["abc", "efg"]),
(
[_client("abc"), _client("efg", ClientType.OS_ANDROID)],
["abc", "efg"],
None,
["abc", "efg"],
),
(
[_client("abc"), _client("efg", ClientType.OS_ANDROID)],
["abc", "efg"],
[ClientType.OS_IOS],
["abc"],
),
],
)
def test_mobile_client_list(
clients,
user_ids: List[str],
client_types: Optional[List[ClientType]],
expected_users: List[str],
) -> None:
clients = [client() for client in clients]
expected = [client for client in clients if client.user_id in expected_users]
if client_types is not None:
mobile_clients = MobileClientQuery(user_ids=user_ids, client_types=client_types)
else:
mobile_clients = MobileClientQuery(user_ids=user_ids)
assert mobile_clients.fetch() == expected
def test_mobile_client_list_only_verified() -> None:
user_id = "user-id"
verified = MobileClient(
id="verified",
user_id=user_id,
messaging_id="token",
client_type=ClientType.OS_IOS,
verified=True,
)
verified.put()
unverified = MobileClient(
id="unverified",
user_id=user_id,
messaging_id="token",
client_type=ClientType.OS_IOS,
verified=False,
)
unverified.put()
assert [verified] == MobileClientQuery(user_ids=[user_id]).fetch()
assert [verified] == MobileClientQuery(
user_ids=[user_id], only_verified=True
).fetch()
assert [unverified, verified] == MobileClientQuery(
user_ids=[user_id], only_verified=False
).fetch()
# def test_delete_for_messaging_id(self):
# user_id_one = 'user_id_one'
# messaging_id_one = 'messaging_id1'
# messaging_id_two = 'messaging_id2'
#
# user_id_two = 'user_id_two'
# messaging_id_three = 'messaging_id3'
#
# for (user_id, messaging_ids) in [(user_id_one, [messaging_id_one, messaging_id_two]), (user_id_two, [messaging_id_three])]:
# for messaging_id in messaging_ids:
# MobileClient(
# parent=ndb.Key(Account, user_id),
# user_id=user_id,
# messaging_id=messaging_id,
# client_type=ClientType.OS_IOS,
# device_uuid=messaging_id[::-1],
# display_name='Phone').put()
#
# MobileClient.delete_for_messaging_id(messaging_id_one)
#
# clients_one = [client.messaging_id for client in MobileClient.query(MobileClient.user_id == 'user_id_one').fetch()]
# clients_two = [client.messaging_id for client in MobileClient.query(MobileClient.user_id == 'user_id_two').fetch()]
#
# self.assertEqual(clients_one, [messaging_id_two])
# self.assertEqual(clients_two, [messaging_id_three])
#
# MobileClient.delete_for_messaging_id(messaging_id_two)
#
# clients_one = [client.messaging_id for client in MobileClient.query(MobileClient.user_id == 'user_id_one').fetch()]
# clients_two = [client.messaging_id for client in MobileClient.query(MobileClient.user_id == 'user_id_two').fetch()]
#
# self.assertEqual(clients_one, [])
# self.assertEqual(clients_two, [messaging_id_three])
#
# MobileClient.delete_for_messaging_id('does_not_exist')
| 35.825758 | 129 | 0.635229 |
34089daa94f4862ae13b7effcfff88a72692ea5c
| 104,315 |
py
|
Python
|
minio/api.py
|
cbows/minio-py
|
cfd711d0594b5885d5cdd6da94527a69b3d5b646
|
[
"Apache-2.0"
] | null | null | null |
minio/api.py
|
cbows/minio-py
|
cfd711d0594b5885d5cdd6da94527a69b3d5b646
|
[
"Apache-2.0"
] | null | null | null |
minio/api.py
|
cbows/minio-py
|
cfd711d0594b5885d5cdd6da94527a69b3d5b646
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# MinIO Python Library for Amazon S3 Compatible Cloud Storage, (C)
# 2015, 2016, 2017 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-lines,disable=too-many-branches,too-many-statements
# pylint: disable=too-many-arguments
"""
Simple Storage Service (aka S3) client to perform bucket and object operations.
"""
from __future__ import absolute_import
import itertools
import json
import os
import platform
from datetime import timedelta
from threading import Thread
from urllib.parse import urlunsplit
from xml.etree import ElementTree as ET
import certifi
import urllib3
from urllib3._collections import HTTPHeaderDict
from . import __title__, __version__, time
from .commonconfig import COPY, REPLACE, ComposeSource, CopySource, Tags
from .credentials import StaticProvider
from .datatypes import (CompleteMultipartUploadResult, ListAllMyBucketsResult,
ListMultipartUploadsResult, ListPartsResult, Object,
Part, PostPolicy, parse_copy_object,
parse_list_objects)
from .deleteobjects import DeleteError, DeleteRequest, DeleteResult
from .error import InvalidResponseError, S3Error, ServerError
from .helpers import (MAX_MULTIPART_COUNT, MAX_MULTIPART_OBJECT_SIZE,
MAX_PART_SIZE, MIN_PART_SIZE, BaseURL, ObjectWriteResult,
ThreadPool, check_bucket_name, check_non_empty_string,
check_sse, check_ssec, genheaders, get_part_info,
headers_to_strings, is_valid_policy_type, makedirs,
md5sum_hash, read_part_data, sha256_hash)
from .legalhold import LegalHold
from .lifecycleconfig import LifecycleConfig
from .notificationconfig import NotificationConfig
from .objectlockconfig import ObjectLockConfig
from .replicationconfig import ReplicationConfig
from .retention import Retention
from .select import SelectObjectReader, SelectRequest
from .signer import presign_v4, sign_v4_s3
from .sse import SseCustomerKey
from .sseconfig import SSEConfig
from .tagging import Tagging
from .versioningconfig import VersioningConfig
from .xml import Element, SubElement, findtext, getbytes, marshal, unmarshal
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
_DEFAULT_USER_AGENT = "MinIO ({os}; {arch}) {lib}/{ver}".format(
os=platform.system(), arch=platform.machine(),
lib=__title__, ver=__version__,
)
class Minio: # pylint: disable=too-many-public-methods
"""
Simple Storage Service (aka S3) client to perform bucket and object
operations.
:param endpoint: Hostname of a S3 service.
:param access_key: Access key (aka user ID) of your account in S3 service.
:param secret_key: Secret Key (aka password) of your account in S3 service.
:param session_token: Session token of your account in S3 service.
:param secure: Flag to indicate to use secure (TLS) connection to S3
service or not.
:param region: Region name of buckets in S3 service.
:param http_client: Customized HTTP client.
:param credentials: Credentials provider of your account in S3 service.
:return: :class:`Minio <Minio>` object
Example::
# Create client with anonymous access.
client = Minio("play.min.io")
# Create client with access and secret key.
client = Minio("s3.amazonaws.com", "ACCESS-KEY", "SECRET-KEY")
# Create client with access key and secret key with specific region.
client = Minio(
"play.minio.io:9000",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
region="my-region",
)
**NOTE on concurrent usage:** `Minio` object is thread safe when using
the Python `threading` library. Specifically, it is **NOT** safe to share
it between multiple processes, for example when using
`multiprocessing.Pool`. The solution is simply to create a new `Minio`
object in each process, and not share it between processes.
"""
# pylint: disable=too-many-function-args
def __init__(self, endpoint, access_key=None,
secret_key=None,
session_token=None,
secure=True,
region=None,
http_client=None,
credentials=None):
# Validate http client has correct base class.
if http_client and not isinstance(
http_client,
urllib3.poolmanager.PoolManager):
raise ValueError(
"HTTP client should be instance of "
"`urllib3.poolmanager.PoolManager`"
)
self._region_map = dict()
self._base_url = BaseURL(
("https://" if secure else "http://") + endpoint,
region,
)
self._user_agent = _DEFAULT_USER_AGENT
self._trace_stream = None
if access_key:
credentials = StaticProvider(access_key, secret_key, session_token)
self._provider = credentials
# Load CA certificates from SSL_CERT_FILE file if set
timeout = timedelta(minutes=5).seconds
ca_certs = os.environ.get('SSL_CERT_FILE') or certifi.where()
self._http = http_client or urllib3.PoolManager(
timeout=urllib3.util.Timeout(connect=timeout, read=timeout),
maxsize=10,
cert_reqs='CERT_REQUIRED',
ca_certs=ca_certs,
retries=urllib3.Retry(
total=5,
backoff_factor=0.2,
status_forcelist=[500, 502, 503, 504]
)
)
def __del__(self):
self._http.clear()
def _handle_redirect_response(
self, method, bucket_name, response, retry=False,
):
"""
Handle redirect response indicates whether retry HEAD request
on failure.
"""
code, message = {
301: ("PermanentRedirect", "Moved Permanently"),
307: ("Redirect", "Temporary redirect"),
400: ("BadRequest", "Bad request"),
}.get(response.status, (None, None))
region = response.getheader("x-amz-bucket-region")
if message and region:
message += "; use region " + region
if (
retry and region and method == "HEAD" and bucket_name and
self._region_map.get(bucket_name)
):
code, message = ("RetryHead", None)
return code, message
def _build_headers(self, host, headers, body, creds):
"""Build headers with given parameters."""
headers = headers or {}
md5sum_added = headers.get("Content-MD5")
headers["Host"] = host
headers["User-Agent"] = self._user_agent
sha256 = None
md5sum = None
if body:
headers["Content-Length"] = str(len(body))
if creds:
if self._base_url.is_https:
sha256 = "UNSIGNED-PAYLOAD"
md5sum = None if md5sum_added else md5sum_hash(body)
else:
sha256 = sha256_hash(body)
else:
md5sum = None if md5sum_added else md5sum_hash(body)
if md5sum:
headers["Content-MD5"] = md5sum
if sha256:
headers["x-amz-content-sha256"] = sha256
if creds and creds.session_token:
headers["X-Amz-Security-Token"] = creds.session_token
date = time.utcnow()
headers["x-amz-date"] = time.to_amz_date(date)
return headers, date
def _url_open( # pylint: disable=too-many-branches
self,
method,
region,
bucket_name=None,
object_name=None,
body=None,
headers=None,
query_params=None,
preload_content=True,
no_body_trace=False,
):
"""Execute HTTP request."""
creds = self._provider.retrieve() if self._provider else None
url = self._base_url.build(
method,
region,
bucket_name=bucket_name,
object_name=object_name,
query_params=query_params,
)
headers, date = self._build_headers(url.netloc, headers, body, creds)
if creds:
headers = sign_v4_s3(
method,
url,
region,
headers,
creds,
headers.get("x-amz-content-sha256"),
date,
)
if self._trace_stream:
self._trace_stream.write("---------START-HTTP---------\n")
self._trace_stream.write(
"{0} {1}{2}{3} HTTP/1.1\n".format(
method,
url.path,
"?" if url.query else "",
url.query or "",
),
)
self._trace_stream.write(
headers_to_strings(headers, titled_key=True),
)
self._trace_stream.write("\n")
if not no_body_trace and body is not None:
self._trace_stream.write("\n")
self._trace_stream.write(
body.decode() if isinstance(body, bytes) else str(body),
)
self._trace_stream.write("\n")
self._trace_stream.write("\n")
http_headers = HTTPHeaderDict()
for key, value in (headers or {}).items():
if isinstance(value, (list, tuple)):
_ = [http_headers.add(key, val) for val in value]
else:
http_headers.add(key, value)
response = self._http.urlopen(
method,
urlunsplit(url),
body=body,
headers=http_headers,
preload_content=preload_content,
)
if self._trace_stream:
self._trace_stream.write("HTTP/1.1 {0}\n".format(response.status))
self._trace_stream.write(
headers_to_strings(response.getheaders()),
)
self._trace_stream.write("\n")
if response.status in [200, 204, 206]:
if self._trace_stream:
if preload_content:
self._trace_stream.write("\n")
self._trace_stream.write(response.data.decode())
self._trace_stream.write("\n")
self._trace_stream.write("----------END-HTTP----------\n")
return response
response.read(cache_content=True)
if not preload_content:
response.release_conn()
if self._trace_stream and method != "HEAD" and response.data:
self._trace_stream.write(response.data.decode())
self._trace_stream.write("\n")
if (
method != "HEAD" and
"application/xml" not in response.getheader(
"content-type", "",
).split(";")
):
if self._trace_stream:
self._trace_stream.write("----------END-HTTP----------\n")
raise InvalidResponseError(
response.status,
response.getheader("content-type"),
response.data.decode() if response.data else None,
)
if not response.data and method != "HEAD":
if self._trace_stream:
self._trace_stream.write("----------END-HTTP----------\n")
raise InvalidResponseError(
response.status,
response.getheader("content-type"),
None,
)
response_error = S3Error.fromxml(response) if response.data else None
if self._trace_stream:
self._trace_stream.write("----------END-HTTP----------\n")
error_map = {
301: lambda: self._handle_redirect_response(
method, bucket_name, response, True,
),
307: lambda: self._handle_redirect_response(
method, bucket_name, response, True,
),
400: lambda: self._handle_redirect_response(
method, bucket_name, response, True,
),
403: lambda: ("AccessDenied", "Access denied"),
404: lambda: (
("NoSuchKey", "Object does not exist")
if object_name
else ("NoSuchBucket", "Bucket does not exist")
if bucket_name
else ("ResourceNotFound", "Request resource not found")
),
405: lambda: (
"MethodNotAllowed",
"The specified method is not allowed against this resource",
),
409: lambda: (
("NoSuchBucket", "Bucket does not exist")
if bucket_name
else ("ResourceConflict", "Request resource conflicts"),
),
501: lambda: (
"MethodNotAllowed",
"The specified method is not allowed against this resource",
),
}
if not response_error:
func = error_map.get(response.status)
code, message = func() if func else (None, None)
if not code:
raise ServerError(
"server failed with HTTP status code {}".format(
response.status,
),
)
response_error = S3Error(
code,
message,
url.path,
response.getheader("x-amz-request-id"),
response.getheader("x-amz-id-2"),
response,
bucket_name=bucket_name,
object_name=object_name,
)
if response_error.code in ["NoSuchBucket", "RetryHead"]:
self._region_map.pop(bucket_name, None)
raise response_error
def _execute(
self,
method,
bucket_name=None,
object_name=None,
body=None,
headers=None,
query_params=None,
preload_content=True,
no_body_trace=False,
):
"""Execute HTTP request."""
region = self._get_region(bucket_name, None)
try:
return self._url_open(
method,
region,
bucket_name=bucket_name,
object_name=object_name,
body=body,
headers=headers,
query_params=query_params,
preload_content=preload_content,
no_body_trace=no_body_trace,
)
except S3Error as exc:
if exc.code != "RetryHead":
raise
# Retry only once on RetryHead error.
try:
return self._url_open(
method,
region,
bucket_name=bucket_name,
object_name=object_name,
body=body,
headers=headers,
query_params=query_params,
preload_content=preload_content,
no_body_trace=no_body_trace,
)
except S3Error as exc:
if exc.code != "RetryHead":
raise
code, message = self._handle_redirect_response(
method, bucket_name, exc.response,
)
raise exc.copy(code, message)
def _get_region(self, bucket_name, region):
"""
Return region of given bucket either from region cache or set in
constructor.
"""
if region:
# Error out if region does not match with region passed via
# constructor.
if self._base_url.region and self._base_url.region != region:
raise ValueError(
"region must be {0}, but passed {1}".format(
self._base_url.region, region,
),
)
return region
if self._base_url.region:
return self._base_url.region
if not bucket_name or not self._provider:
return "us-east-1"
region = self._region_map.get(bucket_name)
if region:
return region
# Execute GetBucketLocation REST API to get region of the bucket.
response = self._url_open(
"GET",
"us-east-1",
bucket_name=bucket_name,
query_params={"location": ""},
)
element = ET.fromstring(response.data.decode())
if not element.text:
region = "us-east-1"
elif element.text == "EU":
region = "eu-west-1"
else:
region = element.text
self._region_map[bucket_name] = region
return region
def set_app_info(self, app_name, app_version):
"""
Set your application name and version to user agent header.
:param app_name: Application name.
:param app_version: Application version.
Example::
client.set_app_info('my_app', '1.0.2')
"""
if not (app_name and app_version):
raise ValueError("Application name/version cannot be empty.")
self._user_agent = "{0} {1}/{2}".format(
_DEFAULT_USER_AGENT, app_name, app_version,
)
def trace_on(self, stream):
"""
Enable http trace.
:param stream: Stream for writing HTTP call tracing.
"""
if not stream:
raise ValueError('Input stream for trace output is invalid.')
# Save new output stream.
self._trace_stream = stream
def trace_off(self):
"""
Disable HTTP trace.
"""
self._trace_stream = None
def enable_accelerate_endpoint(self):
"""Enables accelerate endpoint for Amazon S3 endpoint."""
self._base_url.accelerate_host_flag = True
def disable_accelerate_endpoint(self):
"""Disables accelerate endpoint for Amazon S3 endpoint."""
self._base_url.accelerate_host_flag = False
def enable_dualstack_endpoint(self):
"""Enables dualstack endpoint for Amazon S3 endpoint."""
self._base_url.dualstack_host_flag = True
def disable_dualstack_endpoint(self):
"""Disables dualstack endpoint for Amazon S3 endpoint."""
self._base_url.dualstack_host_flag = False
def enable_virtual_style_endpoint(self):
"""Enables virtual style endpoint."""
self._base_url.virtual_style_flag = True
def disable_virtual_style_endpoint(self):
"""Disables virtual style endpoint."""
self._base_url.virtual_style_flag = False
def select_object_content(self, bucket_name, object_name, request):
"""
Select content of an object by SQL expression.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param request: :class:`SelectRequest <SelectRequest>` object.
:return: A reader contains requested records and progress information.
Example::
with client.select_object_content(
"my-bucket",
"my-object.csv",
SelectRequest(
"select * from S3Object",
CSVInputSerialization(),
CSVOutputSerialization(),
request_progress=True,
),
) as result:
for data in result.stream():
print(data.decode())
print(result.stats())
"""
check_bucket_name(bucket_name)
check_non_empty_string(object_name)
if not isinstance(request, SelectRequest):
raise ValueError("request must be SelectRequest type")
body = marshal(request)
response = self._execute(
"POST",
bucket_name=bucket_name,
object_name=object_name,
body=body,
headers={"Content-MD5": md5sum_hash(body)},
query_params={"select": "", "select-type": "2"},
preload_content=False,
)
return SelectObjectReader(response)
def make_bucket(self, bucket_name, location=None, object_lock=False):
"""
Create a bucket with region and object lock.
:param bucket_name: Name of the bucket.
:param location: Region in which the bucket will be created.
:param object_lock: Flag to set object-lock feature.
Examples::
# Create bucket.
client.make_bucket("my-bucket")
# Create bucket on specific region.
client.make_bucket("my-bucket", "us-west-1")
# Create bucket with object-lock feature on specific region.
client.make_bucket("my-bucket", "eu-west-2", object_lock=True)
"""
check_bucket_name(bucket_name, True)
if self._base_url.region:
# Error out if region does not match with region passed via
# constructor.
if location and self._base_url.region != location:
raise ValueError(
"region must be {0}, but passed {1}".format(
self._base_url.region, location,
),
)
location = location or "us-east-1"
headers = (
{"x-amz-bucket-object-lock-enabled": "true"}
if object_lock else None
)
body = None
if location != "us-east-1":
element = Element("CreateBucketConfiguration")
SubElement(element, "LocationConstraint", location)
body = getbytes(element)
self._url_open(
"PUT",
location,
bucket_name=bucket_name,
body=body,
headers=headers,
)
self._region_map[bucket_name] = location
def list_buckets(self):
"""
List information of all accessible buckets.
:return: List of :class:`Bucket <Bucket>` object.
Example::
buckets = client.list_buckets()
for bucket in buckets:
print(bucket.name, bucket.creation_date)
"""
response = self._execute("GET")
result = unmarshal(ListAllMyBucketsResult, response.data.decode())
return result.buckets
def bucket_exists(self, bucket_name):
"""
Check if a bucket exists.
:param bucket_name: Name of the bucket.
:return: True if the bucket exists.
Example::
if client.bucket_exists("my-bucket"):
print("my-bucket exists")
else:
print("my-bucket does not exist")
"""
check_bucket_name(bucket_name)
try:
self._execute("HEAD", bucket_name)
return True
except S3Error as exc:
if exc.code != "NoSuchBucket":
raise
return False
def remove_bucket(self, bucket_name):
"""
Remove an empty bucket.
:param bucket_name: Name of the bucket.
Example::
client.remove_bucket("my-bucket")
"""
check_bucket_name(bucket_name)
self._execute("DELETE", bucket_name)
self._region_map.pop(bucket_name, None)
def get_bucket_policy(self, bucket_name):
"""
Get bucket policy configuration of a bucket.
:param bucket_name: Name of the bucket.
:return: Bucket policy configuration as JSON string.
Example::
policy = client.get_bucket_policy("my-bucket")
"""
check_bucket_name(bucket_name)
response = self._execute(
"GET", bucket_name, query_params={"policy": ""},
)
return response.data.decode()
def delete_bucket_policy(self, bucket_name):
"""
Delete bucket policy configuration of a bucket.
:param bucket_name: Name of the bucket.
Example::
client.delete_bucket_policy("my-bucket")
"""
check_bucket_name(bucket_name)
self._execute("DELETE", bucket_name, query_params={"policy": ""})
def set_bucket_policy(self, bucket_name, policy):
"""
Set bucket policy configuration to a bucket.
:param bucket_name: Name of the bucket.
:param policy: Bucket policy configuration as JSON string.
Example::
client.set_bucket_policy("my-bucket", policy)
"""
check_bucket_name(bucket_name)
is_valid_policy_type(policy)
self._execute(
"PUT",
bucket_name,
body=policy,
headers={"Content-MD5": md5sum_hash(policy)},
query_params={"policy": ""},
)
def get_bucket_notification(self, bucket_name):
"""
Get notification configuration of a bucket.
:param bucket_name: Name of the bucket.
:return: :class:`NotificationConfig <NotificationConfig>` object.
Example::
config = client.get_bucket_notification("my-bucket")
"""
check_bucket_name(bucket_name)
response = self._execute(
"GET", bucket_name, query_params={"notification": ""},
)
return unmarshal(NotificationConfig, response.data.decode())
def set_bucket_notification(self, bucket_name, config):
"""
Set notification configuration of a bucket.
:param bucket_name: Name of the bucket.
:param config: class:`NotificationConfig <NotificationConfig>` object.
Example::
config = NotificationConfig(
queue_config_list=[
QueueConfig(
"QUEUE-ARN-OF-THIS-BUCKET",
["s3:ObjectCreated:*"],
config_id="1",
prefix_filter_rule=PrefixFilterRule("abc"),
),
],
)
client.set_bucket_notification("my-bucket", config)
"""
check_bucket_name(bucket_name)
if not isinstance(config, NotificationConfig):
raise ValueError("config must be NotificationConfig type")
body = marshal(config)
self._execute(
"PUT",
bucket_name,
body=body,
headers={"Content-MD5": md5sum_hash(body)},
query_params={"notification": ""},
)
def delete_bucket_notification(self, bucket_name):
"""
Delete notification configuration of a bucket. On success, S3 service
stops notification of events previously set of the bucket.
:param bucket_name: Name of the bucket.
Example::
client.delete_bucket_notification("my-bucket")
"""
self.set_bucket_notification(bucket_name, NotificationConfig())
def set_bucket_encryption(self, bucket_name, config):
"""
Set encryption configuration of a bucket.
:param bucket_name: Name of the bucket.
:param config: :class:`SSEConfig <SSEConfig>` object.
Example::
client.set_bucket_encryption(
"my-bucket", SSEConfig(Rule.new_sse_s3_rule()),
)
"""
check_bucket_name(bucket_name)
if not isinstance(config, SSEConfig):
raise ValueError("config must be SSEConfig type")
body = marshal(config)
self._execute(
"PUT",
bucket_name,
body=body,
headers={"Content-MD5": md5sum_hash(body)},
query_params={"encryption": ""},
)
def get_bucket_encryption(self, bucket_name):
"""
Get encryption configuration of a bucket.
:param bucket_name: Name of the bucket.
:return: :class:`SSEConfig <SSEConfig>` object.
Example::
config = client.get_bucket_encryption("my-bucket")
"""
check_bucket_name(bucket_name)
try:
response = self._execute(
"GET",
bucket_name,
query_params={"encryption": ""},
)
return unmarshal(SSEConfig, response.data.decode())
except S3Error as exc:
if exc.code != "ServerSideEncryptionConfigurationNotFoundError":
raise
return None
def delete_bucket_encryption(self, bucket_name):
"""
Delete encryption configuration of a bucket.
:param bucket_name: Name of the bucket.
Example::
client.delete_bucket_encryption("my-bucket")
"""
check_bucket_name(bucket_name)
try:
self._execute(
"DELETE",
bucket_name,
query_params={"encryption": ""},
)
except S3Error as exc:
if exc.code != "ServerSideEncryptionConfigurationNotFoundError":
raise
def listen_bucket_notification(self, bucket_name, prefix='', suffix='',
events=('s3:ObjectCreated:*',
's3:ObjectRemoved:*',
's3:ObjectAccessed:*')):
"""
Listen events of object prefix and suffix of a bucket. Caller should
iterate returned iterator to read new events.
:param bucket_name: Name of the bucket.
:param prefix: Listen events of object starts with prefix.
:param suffix: Listen events of object ends with suffix.
:param events: Events to listen.
:return: Iterator of event records as :dict:.
Example::
events = client.listen_bucket_notification(
"my-bucket",
prefix="my-prefix/",
events=["s3:ObjectCreated:*", "s3:ObjectRemoved:*"],
)
for event in events:
print(event)
"""
check_bucket_name(bucket_name)
if self._base_url.is_aws_host:
raise ValueError(
"ListenBucketNotification API is not supported in Amazon S3",
)
while True:
response = self._execute(
"GET",
bucket_name,
query_params={
"prefix": prefix or "",
"suffix": suffix or "",
"events": events,
},
preload_content=False,
)
try:
for line in response.stream():
line = line.strip()
if not line:
continue
if hasattr(line, 'decode'):
line = line.decode()
event = json.loads(line)
if event['Records']:
yield event
except JSONDecodeError:
pass # Ignore this exception.
finally:
response.close()
response.release_conn()
def set_bucket_versioning(self, bucket_name, config):
"""
Set versioning configuration to a bucket.
:param bucket_name: Name of the bucket.
:param config: :class:`VersioningConfig <VersioningConfig>`.
Example::
client.set_bucket_versioning(
"my-bucket", VersioningConfig(ENABLED),
)
"""
check_bucket_name(bucket_name)
if not isinstance(config, VersioningConfig):
raise ValueError("config must be VersioningConfig type")
body = marshal(config)
self._execute(
"PUT",
bucket_name,
body=body,
headers={"Content-MD5": md5sum_hash(body)},
query_params={"versioning": ""},
)
def get_bucket_versioning(self, bucket_name):
"""
Get versioning configuration of a bucket.
:param bucket_name: Name of the bucket.
:return: :class:`VersioningConfig <VersioningConfig>`.
Example::
config = client.get_bucket_versioning("my-bucket")
print(config.status)
"""
check_bucket_name(bucket_name)
response = self._execute(
"GET",
bucket_name,
query_params={"versioning": ""},
)
return unmarshal(VersioningConfig, response.data.decode())
def fput_object(self, bucket_name, object_name, file_path,
content_type="application/octet-stream",
metadata=None, sse=None, progress=None,
part_size=0, num_parallel_uploads=3,
tags=None, retention=None, legal_hold=False):
"""
Uploads data from a file to an object in a bucket.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param file_path: Name of file to upload.
:param content_type: Content type of the object.
:param metadata: Any additional metadata to be uploaded along
with your PUT request.
:param sse: Server-side encryption.
:param progress: A progress object
:param part_size: Multipart part size
:param num_parallel_uploads: Number of parallel uploads.
:param tags: :class:`Tags` for the object.
:param retention: :class:`Retention` configuration object.
:param legal_hold: Flag to set legal hold for the object.
:return: :class:`ObjectWriteResult` object.
Example::
# Upload data.
result = client.fput_object(
"my-bucket", "my-object", "my-filename",
)
# Upload data with metadata.
result = client.fput_object(
"my-bucket", "my-object", "my-filename",
metadata={"My-Project": "one"},
)
# Upload data with tags, retention and legal-hold.
date = datetime.utcnow().replace(
hour=0, minute=0, second=0, microsecond=0,
) + timedelta(days=30)
tags = Tags(for_object=True)
tags["User"] = "jsmith"
result = client.fput_object(
"my-bucket", "my-object", "my-filename",
tags=tags,
retention=Retention(GOVERNANCE, date),
legal_hold=True,
)
"""
file_size = os.stat(file_path).st_size
with open(file_path, "rb") as file_data:
return self.put_object(
bucket_name, object_name, file_data, file_size,
content_type=content_type,
metadata=metadata, sse=sse, progress=progress,
part_size=part_size, num_parallel_uploads=num_parallel_uploads,
tags=tags, retention=retention, legal_hold=legal_hold,
)
def fget_object(self, bucket_name, object_name, file_path,
request_headers=None, ssec=None, version_id=None,
extra_query_params=None, tmp_file_path=None):
"""
Downloads data of an object to file.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param file_path: Name of file to download.
:param request_headers: Any additional headers to be added with GET
request.
:param ssec: Server-side encryption customer key.
:param version_id: Version-ID of the object.
:param extra_query_params: Extra query parameters for advanced usage.
:param tmp_file_path: Path to a temporary file.
:return: Object information.
Example::
# Download data of an object.
client.fget_object("my-bucket", "my-object", "my-filename")
# Download data of an object of version-ID.
client.fget_object(
"my-bucket", "my-object", "my-filename",
version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d",
)
# Download data of an SSE-C encrypted object.
client.fget_object(
"my-bucket", "my-object", "my-filename",
ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"),
)
"""
check_bucket_name(bucket_name)
check_non_empty_string(object_name)
if os.path.isdir(file_path):
raise ValueError("file {0} is a directory".format(file_path))
# Create top level directory if needed.
makedirs(os.path.dirname(file_path))
stat = self.stat_object(
bucket_name,
object_name,
ssec,
version_id=version_id,
)
# Write to a temporary file "file_path.part.minio" before saving.
tmp_file_path = (
tmp_file_path or file_path + "." + stat.etag + ".part.minio"
)
try:
tmp_file_stat = os.stat(tmp_file_path)
except IOError:
tmp_file_stat = None # Ignore this error.
offset = tmp_file_stat.st_size if tmp_file_stat else 0
if offset > stat.size:
os.remove(tmp_file_path)
offset = 0
response = None
try:
response = self.get_object(
bucket_name,
object_name,
offset=offset,
request_headers=request_headers,
ssec=ssec,
version_id=version_id,
extra_query_params=extra_query_params,
)
with open(tmp_file_path, "ab") as tmp_file:
for data in response.stream(amt=1024*1024):
tmp_file.write(data)
if os.path.exists(file_path):
os.remove(file_path) # For windows compatibility.
os.rename(tmp_file_path, file_path)
return stat
finally:
if response:
response.close()
response.release_conn()
def get_object(self, bucket_name, object_name, offset=0, length=0,
request_headers=None, ssec=None, version_id=None,
extra_query_params=None):
"""
Get data of an object. Returned response should be closed after use to
release network resources. To reuse the connection, it's required to
call `response.release_conn()` explicitly.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param offset: Start byte position of object data.
:param length: Number of bytes of object data from offset.
:param request_headers: Any additional headers to be added with GET
request.
:param ssec: Server-side encryption customer key.
:param version_id: Version-ID of the object.
:param extra_query_params: Extra query parameters for advanced usage.
:return: :class:`urllib3.response.HTTPResponse` object.
Example::
# Get data of an object.
try:
response = client.get_object("my-bucket", "my-object")
# Read data from response.
finally:
response.close()
response.release_conn()
# Get data of an object of version-ID.
try:
response = client.get_object(
"my-bucket", "my-object",
version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d",
)
# Read data from response.
finally:
response.close()
response.release_conn()
# Get data of an object from offset and length.
try:
response = client.get_object(
"my-bucket", "my-object", offset=512, length=1024,
)
# Read data from response.
finally:
response.close()
response.release_conn()
# Get data of an SSE-C encrypted object.
try:
response = client.get_object(
"my-bucket", "my-object",
ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"),
)
# Read data from response.
finally:
response.close()
response.release_conn()
"""
check_bucket_name(bucket_name)
check_non_empty_string(object_name)
check_ssec(ssec)
headers = ssec.headers() if ssec else {}
headers.update(request_headers or {})
if offset or length:
headers['Range'] = 'bytes={}-{}'.format(
offset, offset + length - 1 if length else "")
if version_id:
extra_query_params = extra_query_params or {}
extra_query_params["versionId"] = version_id
return self._execute(
"GET",
bucket_name,
object_name,
headers=headers,
query_params=extra_query_params,
preload_content=False,
)
def copy_object(self, bucket_name, object_name, source,
sse=None, metadata=None, tags=None, retention=None,
legal_hold=False, metadata_directive=None,
tagging_directive=None):
"""
Create an object by server-side copying data from another object.
In this API maximum supported source object size is 5GiB.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param source: :class:`CopySource` object.
:param sse: Server-side encryption of destination object.
:param metadata: Any user-defined metadata to be copied along with
destination object.
:param tags: Tags for destination object.
:param retention: :class:`Retention` configuration object.
:param legal_hold: Flag to set legal hold for destination object.
:param metadata_directive: Directive used to handle user metadata for
destination object.
:param tagging_directive: Directive used to handle tags for destination
object.
:return: :class:`ObjectWriteResult <ObjectWriteResult>` object.
Example::
# copy an object from a bucket to another.
result = client.copy_object(
"my-bucket",
"my-object",
CopySource("my-sourcebucket", "my-sourceobject"),
)
print(result.object_name, result.version_id)
# copy an object with condition.
result = client.copy_object(
"my-bucket",
"my-object",
CopySource(
"my-sourcebucket",
"my-sourceobject",
modified_since=datetime(2014, 4, 1, tzinfo=timezone.utc),
),
)
print(result.object_name, result.version_id)
# copy an object from a bucket with replacing metadata.
metadata = {"test_meta_key": "test_meta_value"}
result = client.copy_object(
"my-bucket",
"my-object",
CopySource("my-sourcebucket", "my-sourceobject"),
metadata=metadata,
metadata_directive=REPLACE,
)
print(result.object_name, result.version_id)
"""
check_bucket_name(bucket_name)
check_non_empty_string(object_name)
if not isinstance(source, CopySource):
raise ValueError("source must be CopySource type")
check_sse(sse)
if tags is not None and not isinstance(tags, Tags):
raise ValueError("tags must be Tags type")
if retention is not None and not isinstance(retention, Retention):
raise ValueError("retention must be Retention type")
if (
metadata_directive is not None and
metadata_directive not in [COPY, REPLACE]
):
raise ValueError(
"metadata directive must be {0} or {1}".format(COPY, REPLACE),
)
if (
tagging_directive is not None and
tagging_directive not in [COPY, REPLACE]
):
raise ValueError(
"tagging directive must be {0} or {1}".format(COPY, REPLACE),
)
size = -1
if source.offset is None and source.length is None:
stat = self.stat_object(
source.bucket_name,
source.object_name,
version_id=source.version_id,
ssec=source.ssec,
)
size = stat.size
if (
source.offset is not None or
source.length is not None or
size > MAX_PART_SIZE
):
if metadata_directive == COPY:
raise ValueError(
"COPY metadata directive is not applicable to source "
"object size greater than 5 GiB",
)
if tagging_directive == COPY:
raise ValueError(
"COPY tagging directive is not applicable to source "
"object size greater than 5 GiB"
)
return self.compose_object(
bucket_name, object_name, ComposeSource.of(source),
sse=sse, metadata=metadata, tags=tags, retention=retention,
legal_hold=legal_hold,
)
headers = genheaders(metadata, sse, tags, retention, legal_hold)
if metadata_directive:
headers["x-amz-metadata-directive"] = metadata_directive
if tagging_directive:
headers["x-amz-tagging-directive"] = tagging_directive
headers.update(source.gen_copy_headers())
response = self._execute(
"PUT",
bucket_name,
object_name=object_name,
headers=headers,
)
etag, last_modified = parse_copy_object(response)
return ObjectWriteResult(
bucket_name,
object_name,
response.getheader("x-amz-version-id"),
etag,
response.getheaders(),
last_modified=last_modified,
)
def _calc_part_count(self, sources):
"""Calculate part count."""
object_size = 0
part_count = 0
i = 0
for src in sources:
i += 1
stat = self.stat_object(
src.bucket_name,
src.object_name,
version_id=src.version_id,
ssec=src.ssec,
)
src.build_headers(stat.size, stat.etag)
size = stat.size
if src.length is not None:
size = src.length
elif src.offset is not None:
size -= src.offset
if (
size < MIN_PART_SIZE and
len(sources) != 1 and
i != len(sources)
):
raise ValueError(
"source {0}/{1}: size {2} must be greater than {3}".format(
src.bucket_name, src.object_name, size, MIN_PART_SIZE,
),
)
object_size += size
if object_size > MAX_MULTIPART_OBJECT_SIZE:
raise ValueError(
"destination object size must be less than {0}".format(
MAX_MULTIPART_OBJECT_SIZE,
),
)
if size > MAX_PART_SIZE:
count = int(size / MAX_PART_SIZE)
last_part_size = size - (count * MAX_PART_SIZE)
if last_part_size > 0:
count += 1
else:
last_part_size = MAX_PART_SIZE
if (
last_part_size < MIN_PART_SIZE and
len(sources) != 1 and
i != len(sources)
):
raise ValueError(
(
"source {0}/{1}: for multipart split upload of "
"{2}, last part size is less than {3}"
).format(
src.bucket_name, src.object_name, size,
MIN_PART_SIZE,
),
)
part_count += count
else:
part_count += 1
if part_count > MAX_MULTIPART_COUNT:
raise ValueError(
(
"Compose sources create more than allowed multipart "
"count {0}"
).format(MAX_MULTIPART_COUNT),
)
return part_count
def _upload_part_copy(self, bucket_name, object_name, upload_id,
part_number, headers):
"""Execute UploadPartCopy S3 API."""
query_params = {
"partNumber": str(part_number),
"uploadId": upload_id,
}
response = self._execute(
"PUT",
bucket_name,
object_name,
headers=headers,
query_params=query_params,
)
return parse_copy_object(response)
def compose_object( # pylint: disable=too-many-branches
self, bucket_name, object_name, sources,
sse=None, metadata=None, tags=None, retention=None,
legal_hold=False,
):
"""
Create an object by combining data from different source objects using
server-side copy.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param sources: List of :class:`ComposeSource` object.
:param sse: Server-side encryption of destination object.
:param metadata: Any user-defined metadata to be copied along with
destination object.
:param tags: Tags for destination object.
:param retention: :class:`Retention` configuration object.
:param legal_hold: Flag to set legal hold for destination object.
:return: :class:`ObjectWriteResult <ObjectWriteResult>` object.
Example::
sources = [
ComposeSource("my-job-bucket", "my-object-part-one"),
ComposeSource("my-job-bucket", "my-object-part-two"),
ComposeSource("my-job-bucket", "my-object-part-three"),
]
# Create my-bucket/my-object by combining source object
# list.
result = client.compose_object("my-bucket", "my-object", sources)
print(result.object_name, result.version_id)
# Create my-bucket/my-object with user metadata by combining
# source object list.
result = client.compose_object(
"my-bucket",
"my-object",
sources,
metadata={"test_meta_key": "test_meta_value"},
)
print(result.object_name, result.version_id)
# Create my-bucket/my-object with user metadata and
# server-side encryption by combining source object list.
client.compose_object(
"my-bucket", "my-object", sources, sse=SseS3(),
)
print(result.object_name, result.version_id)
"""
check_bucket_name(bucket_name)
check_non_empty_string(object_name)
if not isinstance(sources, (list, tuple)) or not sources:
raise ValueError("sources must be non-empty list or tuple type")
i = 0
for src in sources:
if not isinstance(src, ComposeSource):
raise ValueError(
"sources[{0}] must be ComposeSource type".format(i),
)
i += 1
check_sse(sse)
if tags is not None and not isinstance(tags, Tags):
raise ValueError("tags must be Tags type")
if retention is not None and not isinstance(retention, Retention):
raise ValueError("retention must be Retention type")
part_count = self._calc_part_count(sources)
if (
part_count == 1 and
sources[0].offset is None and
sources[0].length is None
):
return self.copy_object(
bucket_name, object_name, CopySource.of(sources[0]),
sse=sse, metadata=metadata, tags=tags, retention=retention,
legal_hold=legal_hold,
metadata_directive=REPLACE if metadata else None,
tagging_directive=REPLACE if tags else None,
)
headers = genheaders(metadata, sse, tags, retention, legal_hold)
upload_id = self._create_multipart_upload(
bucket_name, object_name, headers,
)
ssec_headers = sse.headers() if isinstance(sse, SseCustomerKey) else {}
try:
part_number = 0
total_parts = []
for src in sources:
size = src.object_size
if src.length is not None:
size = src.length
elif src.offset is not None:
size -= src.offset
offset = src.offset or 0
headers = src.headers
headers.update(ssec_headers)
if size <= MAX_PART_SIZE:
part_number += 1
if src.length is not None:
headers["x-amz-copy-source-range"] = (
"bytes={0}-{1}".format(offset, offset+src.length-1)
)
elif src.offset is not None:
headers["x-amz-copy-source-range"] = (
"bytes={0}-{1}".format(offset, offset+size-1)
)
etag, _ = self._upload_part_copy(
bucket_name,
object_name,
upload_id,
part_number,
headers,
)
total_parts.append(Part(part_number, etag))
continue
while size > 0:
part_number += 1
start_bytes = offset
end_bytes = start_bytes + MAX_PART_SIZE
if size < MAX_PART_SIZE:
end_bytes = start_bytes + size
headers_copy = headers.copy()
headers_copy["x-amz-copy-source-range"] = (
"bytes={0}-{1}".format(start_bytes, end_bytes)
)
etag, _ = self._upload_part_copy(
bucket_name,
object_name,
upload_id,
part_number,
headers_copy,
)
total_parts.append(Part(part_number, etag))
offset = start_bytes
size -= end_bytes - start_bytes
result = self._complete_multipart_upload(
bucket_name, object_name, upload_id, total_parts,
)
return ObjectWriteResult(
result.bucket_name,
result.object_name,
result.version_id,
result.etag,
result.http_headers,
location=result.location,
)
except Exception as exc:
if upload_id:
self._abort_multipart_upload(
bucket_name, object_name, upload_id,
)
raise exc
def _abort_multipart_upload(self, bucket_name, object_name, upload_id):
"""Execute AbortMultipartUpload S3 API."""
self._execute(
"DELETE",
bucket_name,
object_name,
query_params={'uploadId': upload_id},
)
def _complete_multipart_upload(
self, bucket_name, object_name, upload_id, parts,
):
"""Execute CompleteMultipartUpload S3 API."""
element = Element("CompleteMultipartUpload")
for part in parts:
tag = SubElement(element, "Part")
SubElement(tag, "PartNumber", str(part.part_number))
SubElement(tag, "ETag", '"' + part.etag + '"')
body = getbytes(element)
response = self._execute(
"POST",
bucket_name,
object_name,
body=body,
headers={
"Content-Type": 'application/xml',
"Content-MD5": md5sum_hash(body),
},
query_params={'uploadId': upload_id},
)
return CompleteMultipartUploadResult(response)
def _create_multipart_upload(self, bucket_name, object_name, headers):
"""Execute CreateMultipartUpload S3 API."""
if not headers.get("Content-Type"):
headers["Content-Type"] = "application/octet-stream"
response = self._execute(
"POST",
bucket_name,
object_name,
headers=headers,
query_params={"uploads": ""},
)
element = ET.fromstring(response.data.decode())
return findtext(element, "UploadId")
def _put_object(self, bucket_name, object_name, data, headers,
query_params=None):
"""Execute PutObject S3 API."""
response = self._execute(
"PUT",
bucket_name,
object_name,
body=data,
headers=headers,
query_params=query_params,
no_body_trace=True,
)
return ObjectWriteResult(
bucket_name,
object_name,
response.getheader("x-amz-version-id"),
response.getheader("etag").replace('"', ""),
response.getheaders(),
)
def _upload_part(self, bucket_name, object_name, data, headers,
upload_id, part_number):
"""Execute UploadPart S3 API."""
query_params = {
"partNumber": str(part_number),
"uploadId": upload_id,
}
result = self._put_object(
bucket_name, object_name, data, headers, query_params=query_params,
)
return result.etag
def _upload_part_task(self, args):
"""Upload_part task for ThreadPool."""
return args[5], self._upload_part(*args)
def put_object(self, bucket_name, object_name, data, length,
content_type="application/octet-stream",
metadata=None, sse=None, progress=None,
part_size=0, num_parallel_uploads=3,
tags=None, retention=None, legal_hold=False):
"""
Uploads data from a stream to an object in a bucket.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param data: An object having callable read() returning bytes object.
:param length: Data size; -1 for unknown size and set valid part_size.
:param content_type: Content type of the object.
:param metadata: Any additional metadata to be uploaded along
with your PUT request.
:param sse: Server-side encryption.
:param progress: A progress object;
:param part_size: Multipart part size.
:param num_parallel_uploads: Number of parallel uploads.
:param tags: :class:`Tags` for the object.
:param retention: :class:`Retention` configuration object.
:param legal_hold: Flag to set legal hold for the object.
:return: :class:`ObjectWriteResult` object.
Example::
# Upload data.
result = client.put_object(
"my-bucket", "my-object", io.BytesIO(b"hello"), 5,
)
# Upload data with metadata.
result = client.put_object(
"my-bucket", "my-object", io.BytesIO(b"hello"), 5,
metadata={"My-Project": "one"},
)
# Upload data with tags, retention and legal-hold.
date = datetime.utcnow().replace(
hour=0, minute=0, second=0, microsecond=0,
) + timedelta(days=30)
tags = Tags(for_object=True)
tags["User"] = "jsmith"
result = client.put_object(
"my-bucket", "my-object", io.BytesIO(b"hello"), 5,
tags=tags,
retention=Retention(GOVERNANCE, date),
legal_hold=True,
)
"""
check_bucket_name(bucket_name)
check_non_empty_string(object_name)
check_sse(sse)
if tags is not None and not isinstance(tags, Tags):
raise ValueError("tags must be Tags type")
if retention is not None and not isinstance(retention, Retention):
raise ValueError("retention must be Retention type")
if not callable(getattr(data, "read")):
raise ValueError("input data must have callable read()")
part_size, part_count = get_part_info(length, part_size)
if progress:
if not isinstance(progress, Thread):
raise TypeError("progress object must be instance of Thread")
# Set progress bar length and object name before upload
progress.set_meta(object_name=object_name, total_length=length)
headers = genheaders(metadata, sse, tags, retention, legal_hold)
headers["Content-Type"] = content_type or "application/octet-stream"
object_size = length
uploaded_size = 0
part_number = 0
one_byte = b""
stop = False
upload_id = None
parts = []
pool = None
try:
while not stop:
part_number += 1
if part_count > 0:
if part_number == part_count:
part_size = object_size - uploaded_size
stop = True
part_data = read_part_data(
data, part_size, progress=progress,
)
if len(part_data) != part_size:
raise IOError(
(
"stream having not enough data;"
"expected: {0}, got: {1} bytes"
).format(part_size, len(part_data))
)
else:
part_data = read_part_data(
data, part_size + 1, one_byte, progress=progress,
)
# If part_data_size is less or equal to part_size,
# then we have reached last part.
if len(part_data) <= part_size:
part_count = part_number
stop = True
else:
one_byte = part_data[-1:]
part_data = part_data[:-1]
uploaded_size += len(part_data)
if part_count == 1:
return self._put_object(
bucket_name, object_name, part_data, headers,
)
if not upload_id:
upload_id = self._create_multipart_upload(
bucket_name, object_name, headers,
)
if num_parallel_uploads and num_parallel_uploads > 1:
pool = ThreadPool(num_parallel_uploads)
pool.start_parallel()
args = (
bucket_name, object_name, part_data,
sse.headers() if isinstance(sse, SseCustomerKey) else None,
upload_id, part_number,
)
if num_parallel_uploads > 1:
pool.add_task(self._upload_part_task, args)
else:
etag = self._upload_part(*args)
parts.append(Part(part_number, etag))
if pool:
result = pool.result()
parts = [None] * part_count
while not result.empty():
part_number, etag = result.get()
parts[part_number-1] = Part(part_number, etag)
result = self._complete_multipart_upload(
bucket_name, object_name, upload_id, parts,
)
return ObjectWriteResult(
result.bucket_name,
result.object_name,
result.version_id,
result.etag,
result.http_headers,
location=result.location,
)
except Exception as exc:
if upload_id:
self._abort_multipart_upload(
bucket_name, object_name, upload_id,
)
raise exc
def list_objects(self, bucket_name, prefix=None, recursive=False,
start_after=None, include_user_meta=False,
include_version=False, use_api_v1=False):
"""
Lists object information of a bucket.
:param bucket_name: Name of the bucket.
:param prefix: Object name starts with prefix.
:param recursive: List recursively than directory structure emulation.
:param start_after: List objects after this key name.
:param include_user_meta: MinIO specific flag to control to include
user metadata.
:param include_version: Flag to control whether include object
versions.
:param use_api_v1: Flag to control to use ListObjectV1 S3 API or not.
:return: Iterator of :class:`Object <Object>`.
Example::
# List objects information.
objects = client.list_objects("my-bucket")
for obj in objects:
print(obj)
# List objects information whose names starts with "my/prefix/".
objects = client.list_objects("my-bucket", prefix="my/prefix/")
for obj in objects:
print(obj)
# List objects information recursively.
objects = client.list_objects("my-bucket", recursive=True)
for obj in objects:
print(obj)
# List objects information recursively whose names starts with
# "my/prefix/".
objects = client.list_objects(
"my-bucket", prefix="my/prefix/", recursive=True,
)
for obj in objects:
print(obj)
# List objects information recursively after object name
# "my/prefix/world/1".
objects = client.list_objects(
"my-bucket", recursive=True, start_after="my/prefix/world/1",
)
for obj in objects:
print(obj)
"""
return self._list_objects(
bucket_name,
delimiter=None if recursive else "/",
include_user_meta=include_user_meta,
prefix=prefix,
start_after=start_after,
use_api_v1=use_api_v1,
include_version=include_version,
)
def stat_object(self, bucket_name, object_name, ssec=None, version_id=None,
extra_query_params=None):
"""
Get object information and metadata of an object.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param ssec: Server-side encryption customer key.
:param version_id: Version ID of the object.
:param extra_query_params: Extra query parameters for advanced usage.
:return: :class:`Object <Object>`.
Example::
# Get object information.
result = client.stat_object("my-bucket", "my-object")
# Get object information of version-ID.
result = client.stat_object(
"my-bucket", "my-object",
version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d",
)
# Get SSE-C encrypted object information.
result = client.stat_object(
"my-bucket", "my-object",
ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"),
)
"""
check_bucket_name(bucket_name)
check_non_empty_string(object_name)
check_ssec(ssec)
headers = ssec.headers() if ssec else {}
query_params = extra_query_params or {}
query_params.update({"versionId": version_id} if version_id else {})
response = self._execute(
"HEAD",
bucket_name,
object_name,
headers=headers,
query_params=query_params,
)
last_modified = response.getheader("last-modified")
if last_modified:
last_modified = time.from_http_header(last_modified)
return Object(
bucket_name,
object_name,
last_modified=last_modified,
etag=response.getheader("etag", "").replace('"', ""),
size=int(response.getheader("content-length", "0")),
content_type=response.getheader("content-type"),
metadata=response.headers,
version_id=response.getheader("x-amz-version-id"),
)
def remove_object(self, bucket_name, object_name, version_id=None):
"""
Remove an object.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param version_id: Version ID of the object.
Example::
# Remove object.
client.remove_object("my-bucket", "my-object")
# Remove version of an object.
client.remove_object(
"my-bucket", "my-object",
version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d",
)
"""
check_bucket_name(bucket_name)
check_non_empty_string(object_name)
self._execute(
"DELETE",
bucket_name,
object_name,
query_params={"versionId": version_id} if version_id else None,
)
def _delete_objects(self, bucket_name, delete_object_list,
quiet=False, bypass_governance_mode=False):
"""
Delete multiple objects.
:param bucket_name: Name of the bucket.
:param delete_object_list: List of maximum 1000
:class:`DeleteObject <DeleteObject>` object.
:param quiet: quiet flag.
:param bypass_governance_mode: Bypass Governance retention mode.
:return: :class:`DeleteResult <DeleteResult>` object.
"""
body = marshal(DeleteRequest(delete_object_list, quiet=quiet))
headers = {"Content-MD5": md5sum_hash(body)}
if bypass_governance_mode:
headers["x-amz-bypass-governance-retention"] = "true"
response = self._execute(
"POST",
bucket_name,
body=body,
headers=headers,
query_params={"delete": ""},
)
element = ET.fromstring(response.data.decode())
return (
DeleteResult([], [DeleteError.fromxml(element)])
if element.tag.endswith("Error")
else unmarshal(DeleteResult, response.data.decode())
)
def remove_objects(self, bucket_name, delete_object_list,
bypass_governance_mode=False):
"""
Remove multiple objects.
:param bucket_name: Name of the bucket.
:param delete_object_list: An iterable containing
:class:`DeleteObject <DeleteObject>` object.
:param bypass_governance_mode: Bypass Governance retention mode.
:return: An iterator containing :class:`DeleteError <DeleteError>`
object.
Example::
# Remove list of objects.
errors = client.remove_objects(
"my-bucket",
[
DeleteObject("my-object1"),
DeleteObject("my-object2"),
DeleteObject(
"my-object3", "13f88b18-8dcd-4c83-88f2-8631fdb6250c",
),
],
)
for error in errors:
print("error occured when deleting object", error)
# Remove a prefix recursively.
delete_object_list = map(
lambda x: DeleteObject(x.object_name),
client.list_objects("my-bucket", "my/prefix/", recursive=True),
)
errors = client.remove_objects("my-bucket", delete_object_list)
for error in errors:
print("error occured when deleting object", error)
"""
check_bucket_name(bucket_name)
# turn list like objects into an iterator.
delete_object_list = itertools.chain(delete_object_list)
while True:
# get 1000 entries or whatever available.
objects = [
delete_object for _, delete_object in zip(
range(1000), delete_object_list,
)
]
if not objects:
break
result = self._delete_objects(
bucket_name,
objects,
quiet=True,
bypass_governance_mode=bypass_governance_mode,
)
for error in result.error_list:
# AWS S3 returns "NoSuchVersion" error when
# version doesn't exist ignore this error
# yield all errors otherwise
if error.code != "NoSuchVersion":
yield error
def get_presigned_url(self, method, bucket_name, object_name,
expires=timedelta(days=7), response_headers=None,
request_date=None, version_id=None,
extra_query_params=None):
"""
Get presigned URL of an object for HTTP method, expiry time and custom
request parameters.
:param method: HTTP method.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param expires: Expiry in seconds; defaults to 7 days.
:param response_headers: Optional response_headers argument to
specify response fields like date, size,
type of file, data about server, etc.
:param request_date: Optional request_date argument to
specify a different request date. Default is
current date.
:param version_id: Version ID of the object.
:param extra_query_params: Extra query parameters for advanced usage.
:return: URL string.
Example::
# Get presigned URL string to delete 'my-object' in
# 'my-bucket' with one day expiry.
url = client.get_presigned_url(
"DELETE",
"my-bucket",
"my-object",
expires=timedelta(days=1),
)
print(url)
"""
check_bucket_name(bucket_name)
check_non_empty_string(object_name)
if expires.total_seconds() < 1 or expires.total_seconds() > 604800:
raise ValueError("expires must be between 1 second to 7 days")
region = self._get_region(bucket_name, None)
query_params = extra_query_params or {}
query_params.update({"versionId": version_id} if version_id else {})
query_params.update(response_headers or {})
creds = self._provider.retrieve() if self._provider else None
if creds and creds.session_token:
query_params["X-Amz-Security-Token"] = creds.session_token
url = self._base_url.build(
method,
region,
bucket_name=bucket_name,
object_name=object_name,
query_params=query_params,
)
if creds:
url = presign_v4(
method,
url,
region,
creds,
request_date or time.utcnow(),
int(expires.total_seconds()),
)
return urlunsplit(url)
def presigned_get_object(self, bucket_name, object_name,
expires=timedelta(days=7),
response_headers=None,
request_date=None,
version_id=None,
extra_query_params=None):
"""
Get presigned URL of an object to download its data with expiry time
and custom request parameters.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param expires: Expiry in seconds; defaults to 7 days.
:param response_headers: Optional response_headers argument to
specify response fields like date, size,
type of file, data about server, etc.
:param request_date: Optional request_date argument to
specify a different request date. Default is
current date.
:param version_id: Version ID of the object.
:param extra_query_params: Extra query parameters for advanced usage.
:return: URL string.
Example::
# Get presigned URL string to download 'my-object' in
# 'my-bucket' with default expiry (i.e. 7 days).
url = client.presigned_get_object("my-bucket", "my-object")
print(url)
# Get presigned URL string to download 'my-object' in
# 'my-bucket' with two hours expiry.
url = client.presigned_get_object(
"my-bucket", "my-object", expires=timedelta(hours=2),
)
print(url)
"""
return self.get_presigned_url(
"GET",
bucket_name,
object_name,
expires,
response_headers=response_headers,
request_date=request_date,
version_id=version_id,
extra_query_params=extra_query_params,
)
def presigned_put_object(self, bucket_name, object_name,
expires=timedelta(days=7)):
"""
Get presigned URL of an object to upload data with expiry time and
custom request parameters.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param expires: Expiry in seconds; defaults to 7 days.
:return: URL string.
Example::
# Get presigned URL string to upload data to 'my-object' in
# 'my-bucket' with default expiry (i.e. 7 days).
url = client.presigned_put_object("my-bucket", "my-object")
print(url)
# Get presigned URL string to upload data to 'my-object' in
# 'my-bucket' with two hours expiry.
url = client.presigned_put_object(
"my-bucket", "my-object", expires=timedelta(hours=2),
)
print(url)
"""
return self.get_presigned_url(
"PUT", bucket_name, object_name, expires,
)
def presigned_post_policy(self, policy):
"""
Get form-data of PostPolicy of an object to upload its data using POST
method.
:param policy: :class:`PostPolicy <PostPolicy>`.
:return: :dict: contains form-data.
Example::
policy = PostPolicy(
"my-bucket", datetime.utcnow() + timedelta(days=10),
)
policy.add_starts_with_condition("key", "my/object/prefix/")
policy.add_content_length_range_condition(
1*1024*1024, 10*1024*1024,
)
form_data = client.presigned_post_policy(policy)
"""
if not isinstance(policy, PostPolicy):
raise ValueError("policy must be PostPolicy type")
if not self._provider:
raise ValueError(
"anonymous access does not require presigned post form-data",
)
return policy.form_data(
self._provider.retrieve(),
self._get_region(policy.bucket_name, None),
)
def delete_bucket_replication(self, bucket_name):
"""
Delete replication configuration of a bucket.
:param bucket_name: Name of the bucket.
Example::
client.delete_bucket_replication("my-bucket")
"""
check_bucket_name(bucket_name)
self._execute("DELETE", bucket_name, query_params={"replication": ""})
def get_bucket_replication(self, bucket_name):
"""
Get bucket replication configuration of a bucket.
:param bucket_name: Name of the bucket.
:return: :class:`ReplicationConfig <ReplicationConfig>` object.
Example::
config = client.get_bucket_replication("my-bucket")
"""
check_bucket_name(bucket_name)
try:
response = self._execute(
"GET", bucket_name, query_params={"replication": ""},
)
return unmarshal(ReplicationConfig, response.data.decode())
except S3Error as exc:
if exc.code != "ReplicationConfigurationNotFoundError":
raise
return None
def set_bucket_replication(self, bucket_name, config):
"""
Set bucket replication configuration to a bucket.
:param bucket_name: Name of the bucket.
:param config: :class:`ReplicationConfig <ReplicationConfig>` object.
Example::
config = ReplicationConfig(
"REPLACE-WITH-ACTUAL-ROLE",
[
Rule(
Destination(
"REPLACE-WITH-ACTUAL-DESTINATION-BUCKET-ARN",
),
ENABLED,
delete_marker_replication=DeleteMarkerReplication(
DISABLED,
),
rule_filter=Filter(
AndOperator(
"TaxDocs",
{"key1": "value1", "key2": "value2"},
),
),
rule_id="rule1",
priority=1,
),
],
)
client.set_bucket_replication("my-bucket", config)
"""
check_bucket_name(bucket_name)
if not isinstance(config, ReplicationConfig):
raise ValueError("config must be ReplicationConfig type")
body = marshal(config)
self._execute(
"PUT",
bucket_name,
body=body,
headers={"Content-MD5": md5sum_hash(body)},
query_params={"replication": ""},
)
def delete_bucket_lifecycle(self, bucket_name):
"""
Delete notification configuration of a bucket.
:param bucket_name: Name of the bucket.
Example::
client.delete_bucket_lifecycle("my-bucket")
"""
check_bucket_name(bucket_name)
self._execute("DELETE", bucket_name, query_params={"lifecycle": ""})
def get_bucket_lifecycle(self, bucket_name):
"""
Get bucket lifecycle configuration of a bucket.
:param bucket_name: Name of the bucket.
:return: :class:`LifecycleConfig <LifecycleConfig>` object.
Example::
config = client.get_bucket_lifecycle("my-bucket")
"""
check_bucket_name(bucket_name)
try:
response = self._execute(
"GET", bucket_name, query_params={"lifecycle": ""},
)
return unmarshal(LifecycleConfig, response.data.decode())
except S3Error as exc:
if exc.code != "NoSuchLifecycleConfiguration":
raise
return None
def set_bucket_lifecycle(self, bucket_name, config):
"""
Set bucket lifecycle configuration to a bucket.
:param bucket_name: Name of the bucket.
:param config: :class:`LifecycleConfig <LifecycleConfig>` object.
Example::
config = LifecycleConfig(
[
Rule(
ENABLED,
rule_filter=Filter(prefix="documents/"),
rule_id="rule1",
transition=Transition(
days=30, storage_class="GLACIER",
),
),
Rule(
ENABLED,
rule_filter=Filter(prefix="logs/"),
rule_id="rule2",
expiration=Expiration(days=365),
),
],
)
client.set_bucket_lifecycle("my-bucket", config)
"""
check_bucket_name(bucket_name)
if not isinstance(config, LifecycleConfig):
raise ValueError("config must be LifecycleConfig type")
body = marshal(config)
self._execute(
"PUT",
bucket_name,
body=body,
headers={"Content-MD5": md5sum_hash(body)},
query_params={"lifecycle": ""},
)
def delete_bucket_tags(self, bucket_name):
"""
Delete tags configuration of a bucket.
:param bucket_name: Name of the bucket.
Example::
client.delete_bucket_tags("my-bucket")
"""
check_bucket_name(bucket_name)
self._execute("DELETE", bucket_name, query_params={"tagging": ""})
def get_bucket_tags(self, bucket_name):
"""
Get tags configuration of a bucket.
:param bucket_name: Name of the bucket.
:return: :class:`Tags <Tags>` object.
Example::
tags = client.get_bucket_tags("my-bucket")
"""
check_bucket_name(bucket_name)
try:
response = self._execute(
"GET", bucket_name, query_params={"tagging": ""},
)
tagging = unmarshal(Tagging, response.data.decode())
return tagging.tags
except S3Error as exc:
if exc.code != "NoSuchTagSet":
raise
return None
def set_bucket_tags(self, bucket_name, tags):
"""
Set tags configuration to a bucket.
:param bucket_name: Name of the bucket.
:param tags: :class:`Tags <Tags>` object.
Example::
tags = Tags.new_bucket_tags()
tags["Project"] = "Project One"
tags["User"] = "jsmith"
client.set_bucket_tags("my-bucket", tags)
"""
check_bucket_name(bucket_name)
if not isinstance(tags, Tags):
raise ValueError("tags must be Tags type")
body = marshal(Tagging(tags))
self._execute(
"PUT",
bucket_name,
body=body,
headers={"Content-MD5": md5sum_hash(body)},
query_params={"tagging": ""},
)
def delete_object_tags(self, bucket_name, object_name, version_id=None):
"""
Delete tags configuration of an object.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param version_id: Version ID of the Object.
Example::
client.delete_object_tags("my-bucket", "my-object")
"""
check_bucket_name(bucket_name)
check_non_empty_string(object_name)
query_params = {"versionId": version_id} if version_id else {}
query_params["tagging"] = ""
self._execute(
"DELETE",
bucket_name,
object_name=object_name,
query_params=query_params,
)
def get_object_tags(self, bucket_name, object_name, version_id=None):
"""
Get tags configuration of a object.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param version_id: Version ID of the Object.
:return: :class:`Tags <Tags>` object.
Example::
tags = client.get_object_tags("my-bucket", "my-object")
"""
check_bucket_name(bucket_name)
check_non_empty_string(object_name)
query_params = {"versionId": version_id} if version_id else {}
query_params["tagging"] = ""
try:
response = self._execute(
"GET",
bucket_name,
object_name=object_name,
query_params=query_params,
)
tagging = unmarshal(Tagging, response.data.decode())
return tagging.tags
except S3Error as exc:
if exc.code != "NoSuchTagSet":
raise
return None
def set_object_tags(self, bucket_name, object_name, tags, version_id=None):
"""
Set tags configuration to an object.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param version_id: Version ID of the Object.
:param tags: :class:`Tags <Tags>` object.
Example::
tags = Tags.new_object_tags()
tags["Project"] = "Project One"
tags["User"] = "jsmith"
client.set_object_tags("my-bucket", "my-object", tags)
"""
check_bucket_name(bucket_name)
check_non_empty_string(object_name)
if not isinstance(tags, Tags):
raise ValueError("tags must be Tags type")
body = marshal(Tagging(tags))
query_params = {"versionId": version_id} if version_id else {}
query_params["tagging"] = ""
self._execute(
"PUT",
bucket_name,
object_name=object_name,
body=body,
headers={"Content-MD5": md5sum_hash(body)},
query_params=query_params,
)
def enable_object_legal_hold(
self, bucket_name, object_name, version_id=None,
):
"""
Enable legal hold on an object.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param version_id: Version ID of the object.
Example::
client.enable_object_legal_hold("my-bucket", "my-object")
"""
check_bucket_name(bucket_name)
check_non_empty_string(object_name)
body = marshal(LegalHold(True))
query_params = {"versionId", version_id} if version_id else {}
query_params["legal-hold"] = ""
self._execute(
"PUT",
bucket_name,
object_name=object_name,
body=body,
headers={"Content-MD5": md5sum_hash(body)},
query_params=query_params,
)
def disable_object_legal_hold(
self, bucket_name, object_name, version_id=None,
):
"""
Disable legal hold on an object.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param version_id: Version ID of the object.
Example::
client.disable_object_legal_hold("my-bucket", "my-object")
"""
check_bucket_name(bucket_name)
check_non_empty_string(object_name)
body = marshal(LegalHold(False))
query_params = {"versionId", version_id} if version_id else {}
query_params["legal-hold"] = ""
self._execute(
"PUT",
bucket_name,
object_name=object_name,
body=body,
headers={"Content-MD5": md5sum_hash(body)},
query_params=query_params,
)
def is_object_legal_hold_enabled(
self, bucket_name, object_name, version_id=None,
):
"""
Returns true if legal hold is enabled on an object.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param version_id: Version ID of the object.
Example::
if client.is_object_legal_hold_enabled("my-bucket", "my-object"):
print("legal hold is enabled on my-object")
else:
print("legal hold is not enabled on my-object")
"""
check_bucket_name(bucket_name)
check_non_empty_string(object_name)
query_params = {"versionId", version_id} if version_id else {}
query_params["legal-hold"] = ""
try:
response = self._execute(
"GET",
bucket_name,
object_name=object_name,
query_params=query_params,
)
legal_hold = unmarshal(LegalHold, response.data.decode())
return legal_hold.status
except S3Error as exc:
if exc.code != "NoSuchObjectLockConfiguration":
raise
return False
def delete_object_lock_config(self, bucket_name):
"""
Delete object-lock configuration of a bucket.
:param bucket_name: Name of the bucket.
Example::
client.delete_object_lock_config("my-bucket")
"""
self.set_object_lock_config(
bucket_name, ObjectLockConfig(None, None, None)
)
def get_object_lock_config(self, bucket_name):
"""
Get object-lock configuration of a bucket.
:param bucket_name: Name of the bucket.
:return: :class:`ObjectLockConfig <ObjectLockConfig>` object.
Example::
config = client.get_object_lock_config("my-bucket")
"""
check_bucket_name(bucket_name)
response = self._execute(
"GET", bucket_name, query_params={"object-lock": ""},
)
return unmarshal(ObjectLockConfig, response.data.decode())
def set_object_lock_config(self, bucket_name, config):
"""
Set object-lock configuration to a bucket.
:param bucket_name: Name of the bucket.
:param config: :class:`ObjectLockConfig <ObjectLockConfig>` object.
Example::
config = ObjectLockConfig(GOVERNANCE, 15, DAYS)
client.set_object_lock_condig("my-bucket", config)
"""
check_bucket_name(bucket_name)
if not isinstance(config, ObjectLockConfig):
raise ValueError("config must be ObjectLockConfig type")
body = marshal(config)
self._execute(
"PUT",
bucket_name,
body=body,
headers={"Content-MD5": md5sum_hash(body)},
query_params={"object-lock": ""},
)
def get_object_retention(
self, bucket_name, object_name, version_id=None,
):
"""
Get retention configuration of an object.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param version_id: Version ID of the object.
:return: :class:`Retention <Retention>` object.
Example::
config = client.get_object_retention("my-bucket", "my-object")
"""
check_bucket_name(bucket_name)
check_non_empty_string(object_name)
query_params = {"versionId", version_id} if version_id else {}
query_params["retention"] = ""
try:
response = self._execute(
"GET",
bucket_name,
object_name=object_name,
query_params=query_params,
)
return unmarshal(Retention, response.data.decode())
except S3Error as exc:
if exc.code != "NoSuchObjectLockConfiguration":
raise
return None
def set_object_retention(
self, bucket_name, object_name, config, version_id=None,
):
"""
Set retention configuration on an object.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param version_id: Version ID of the object.
:param config: :class:`Retention <Retention>` object.
Example::
config = Retention(
GOVERNANCE, datetime.utcnow() + timedelta(days=10),
)
client.set_object_retention("my-bucket", "my-object", config)
"""
check_bucket_name(bucket_name)
check_non_empty_string(object_name)
if not isinstance(config, Retention):
raise ValueError("config must be Retention type")
body = marshal(config)
query_params = {"versionId", version_id} if version_id else {}
query_params["retention"] = ""
self._execute(
"PUT",
bucket_name,
object_name=object_name,
body=body,
headers={"Content-MD5": md5sum_hash(body)},
query_params=query_params,
)
def _list_objects( # pylint: disable=too-many-arguments,too-many-branches
self,
bucket_name,
continuation_token=None, # listV2 only
delimiter=None, # all
encoding_type=None, # all
fetch_owner=None, # listV2 only
include_user_meta=None, # MinIO specific listV2.
max_keys=None, # all
prefix=None, # all
start_after=None, # all: v1:marker, versioned:key_marker
version_id_marker=None, # versioned
use_api_v1=False,
include_version=False,
):
"""
List objects optionally including versions.
Note: Its required to send empty values to delimiter/prefix and 1000 to
max-keys when not provided for server-side bucket policy evaluation to
succeed; otherwise AccessDenied error will be returned for such
policies.
"""
check_bucket_name(bucket_name)
if version_id_marker:
include_version = True
is_truncated = True
while is_truncated:
query = {}
if include_version:
query["versions"] = ""
elif not use_api_v1:
query["list-type"] = "2"
if not include_version and not use_api_v1:
if continuation_token:
query["continuation-token"] = continuation_token
if fetch_owner:
query["fetch-owner"] = "true"
if include_user_meta:
query["metadata"] = "true"
query["delimiter"] = delimiter or ""
if encoding_type:
query["encoding-type"] = encoding_type
query["max-keys"] = str(max_keys or 1000)
query["prefix"] = prefix or ""
if start_after:
if include_version:
query["key-marker"] = start_after
elif use_api_v1:
query["marker"] = start_after
else:
query["start-after"] = start_after
if version_id_marker:
query["version-id-marker"] = version_id_marker
response = self._execute("GET", bucket_name, query_params=query)
objects, is_truncated, start_after, version_id_marker = (
parse_list_objects(response, bucket_name)
)
if not include_version:
version_id_marker = None
if not use_api_v1:
continuation_token = start_after
for obj in objects:
yield obj
def _list_multipart_uploads(self, bucket_name, delimiter=None,
encoding_type=None, key_marker=None,
max_uploads=None, prefix=None,
upload_id_marker=None, extra_headers=None,
extra_query_params=None):
"""
Execute ListMultipartUploads S3 API.
:param bucket_name: Name of the bucket.
:param delimiter: (Optional) Delimiter on listing.
:param encoding_type: (Optional) Encoding type.
:param key_marker: (Optional) Key marker.
:param max_uploads: (Optional) Maximum upload information to fetch.
:param prefix: (Optional) Prefix on listing.
:param upload_id_marker: (Optional) Upload ID marker.
:param extra_headers: (Optional) Extra headers for advanced usage.
:param extra_query_params: (Optional) Extra query parameters for
advanced usage.
:return:
:class:`ListMultipartUploadsResult <ListMultipartUploadsResult>`
object
"""
query_params = extra_query_params or {}
query_params.update(
{
"uploads": "",
"delimiter": delimiter or "",
"max-uploads": str(max_uploads or 1000),
"prefix": prefix or "",
"encoding-type": "url",
},
)
if encoding_type:
query_params["encoding-type"] = encoding_type
if key_marker:
query_params["key-marker"] = key_marker
if upload_id_marker:
query_params["upload-id-marker"] = upload_id_marker
response = self._execute(
"GET",
bucket_name,
query_params=query_params,
headers=extra_headers,
)
return ListMultipartUploadsResult(response)
def _list_parts(self, bucket_name, object_name, upload_id,
max_parts=None, part_number_marker=None,
extra_headers=None, extra_query_params=None):
"""
Execute ListParts S3 API.
:param bucket_name: Name of the bucket.
:param object_name: Object name in the bucket.
:param upload_id: Upload ID.
:param max_parts: (Optional) Maximum parts information to fetch.
:param part_number_marker: (Optional) Part number marker.
:param extra_headers: (Optional) Extra headers for advanced usage.
:param extra_query_params: (Optional) Extra query parameters for
advanced usage.
:return: :class:`ListPartsResult <ListPartsResult>` object
"""
query_params = extra_query_params or {}
query_params.update(
{
"uploadId": upload_id,
"max-parts": str(max_parts or 1000),
},
)
if part_number_marker:
query_params["part-number-marker"] = part_number_marker
response = self._execute(
"GET",
bucket_name,
object_name=object_name,
query_params=query_params,
headers=extra_headers,
)
return ListPartsResult(response)
| 36.321379 | 79 | 0.55088 |
7d0f2eae4ff3614d4d3284ac51a58fb9dc37e3a1
| 125 |
py
|
Python
|
djangoserver/src/companies/admin.py
|
Higgins723/web-homework
|
f10d33fd6c5dface9350f95dab8ba2dcc7c9660f
|
[
"MIT"
] | null | null | null |
djangoserver/src/companies/admin.py
|
Higgins723/web-homework
|
f10d33fd6c5dface9350f95dab8ba2dcc7c9660f
|
[
"MIT"
] | null | null | null |
djangoserver/src/companies/admin.py
|
Higgins723/web-homework
|
f10d33fd6c5dface9350f95dab8ba2dcc7c9660f
|
[
"MIT"
] | 1 |
2022-01-19T06:55:41.000Z
|
2022-01-19T06:55:41.000Z
|
from django.contrib import admin
from .models import Companies
# Register your models here.
admin.site.register(Companies)
| 17.857143 | 32 | 0.808 |
d819a766df10675ab1a886a3bea0c2f0ac9ee236
| 571 |
py
|
Python
|
shadrus/article/migrations/0005_comments_comments_from.py
|
Timurdov/bionicprojectpython
|
4bec61e43b35733d2281928771088b8df420e4ea
|
[
"Apache-2.0"
] | null | null | null |
shadrus/article/migrations/0005_comments_comments_from.py
|
Timurdov/bionicprojectpython
|
4bec61e43b35733d2281928771088b8df420e4ea
|
[
"Apache-2.0"
] | null | null | null |
shadrus/article/migrations/0005_comments_comments_from.py
|
Timurdov/bionicprojectpython
|
4bec61e43b35733d2281928771088b8df420e4ea
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('article', '0004_auto_20150217_1539'),
]
operations = [
migrations.AddField(
model_name='comments',
name='comments_from',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| 24.826087 | 76 | 0.654991 |
afe03611890cb1ef091505d42b0ff8b51507f342
| 8,142 |
py
|
Python
|
hojehatransportes/hat/feeds.py
|
jpgneves/hojehatransportes
|
00913462d997d6c1aabfa3b8292072c9f928939a
|
[
"MIT"
] | null | null | null |
hojehatransportes/hat/feeds.py
|
jpgneves/hojehatransportes
|
00913462d997d6c1aabfa3b8292072c9f928939a
|
[
"MIT"
] | 1 |
2015-12-14T06:40:15.000Z
|
2015-12-14T06:40:15.000Z
|
hojehatransportes/hat/feeds.py
|
jpgneves/hojehatransportes
|
00913462d997d6c1aabfa3b8292072c9f928939a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.utils import feedgenerator, tzinfo
from django.utils.encoding import force_unicode, iri_to_uri, smart_unicode
from django.template import loader, TemplateDoesNotExist, RequestContext
from django.conf import settings
from django.contrib.sites.models import Site, RequestSite
from django.contrib.syndication.views import Feed
from django.utils.feedgenerator import Atom1Feed
from django_cal.views import Events
from datetime import datetime, date, timedelta
import django_cal
import locale
import dateutil
from hojehatransportes.hat.models import Strike, Region
locale.setlocale(locale.LC_ALL, "pt_PT.UTF-8")
tzlx = dateutil.tz.gettz('Europe/Lisbon')
def strikeItems():
return Strike.objects.filter(start_date__gte=datetime.today().date()).order_by('start_date').exclude(approved=False)[:20]
# LolFeed: Because "lol, Django". Search for CHANGE
class LolFeed(Feed):
def get_feed(self, obj, request):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
#print "ARGH! ",
#print Site._meta.installed,
#print " (" + str(Site.objects.get_current()) + "), )",
#print ", ",
#print RequestSite(request)
#if Site._meta.installed:
# current_site = Site.objects.get_current()
#else:
current_site = RequestSite(request)
link = self.__get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link)
feed = self.feed_type(
title = self.__get_dynamic_attr('title', obj),
subtitle = self.__get_dynamic_attr('subtitle', obj),
link = link,
description = self.__get_dynamic_attr('description', obj),
language = settings.LANGUAGE_CODE.decode(),
feed_url = add_domain(current_site.domain,
self.__get_dynamic_attr('feed_url', obj) or request.path),
author_name = self.__get_dynamic_attr('author_name', obj),
author_link = self.__get_dynamic_attr('author_link', obj),
author_email = self.__get_dynamic_attr('author_email', obj),
categories = self.__get_dynamic_attr('categories', obj),
feed_copyright = self.__get_dynamic_attr('feed_copyright', obj),
feed_guid = self.__get_dynamic_attr('feed_guid', obj),
ttl = self.__get_dynamic_attr('ttl', obj),
**self.feed_extra_kwargs(obj)
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = loader.get_template(self.title_template)
except TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = loader.get_template(self.description_template)
except TemplateDoesNotExist:
pass
for item in self.__get_dynamic_attr('items', obj):
if title_tmp is not None:
title = title_tmp.render(RequestContext(request, {'obj': item, 'site': current_site}))
else:
title = self.__get_dynamic_attr('item_title', item)
if description_tmp is not None:
description = description_tmp.render(RequestContext(request, {'obj': item, 'site': current_site}))
else:
description = self.__get_dynamic_attr('item_description', item)
link = add_domain(current_site.domain, self.__get_dynamic_attr('item_link', item))
enc = None
enc_url = self.__get_dynamic_attr('item_enclosure_url', item)
if enc_url:
enc = feedgenerator.Enclosure(
url = smart_unicode(enc_url),
length = smart_unicode(self.__get_dynamic_attr('item_enclosure_length', item)),
mime_type = smart_unicode(self.__get_dynamic_attr('item_enclosure_mime_type', item))
)
author_name = self.__get_dynamic_attr('item_author_name', item)
if author_name is not None:
author_email = self.__get_dynamic_attr('item_author_email', item)
author_link = self.__get_dynamic_attr('item_author_link', item)
else:
author_email = author_link = None
pubdate = self.__get_dynamic_attr('item_pubdate', item)
if pubdate and not pubdate.tzinfo:
ltz = tzinfo.LocalTimezone(pubdate)
pubdate = pubdate.replace(tzinfo=ltz)
feed.add_item(
title = title,
link = link,
description = description,
unique_id = self.__get_dynamic_attr('item_guid', item, link),
enclosure = enc,
pubdate = pubdate,
author_name = author_name,
author_email = author_email,
author_link = author_link,
categories = self.__get_dynamic_attr('item_categories', item),
item_copyright = self.__get_dynamic_attr('item_copyright', item),
**self.item_extra_kwargs(item)
)
return feed
def add_domain(domain, url):
if not (url.startswith('http://')
or url.startswith('https://')
or url.startswith('mailto:')):
# 'url' must already be ASCII and URL-quoted, so no need for encoding
# conversions here.
url = iri_to_uri(u'http://%s%s' % (domain, url))
return url
class RssFeed(Feed):
"""Generate an RSS of the strikes"""
title = u'Hoje há greve?'
link = 'http://hagreve.com'
description = u'Veja se consegue chegar ao trabalho. Lembre-se que as informações podem estar desactualizadas.'
author_name = 'hagreve.com'
author_link = 'http://hagreve.com'
author_email = '[email protected]'
copyright = 'hagreve.com, ' + str(datetime.now().year)
def items(self):
return strikeItems().reverse()
def item_title(self, strike):
return strike.company.name + ' - ' + strike.region.name
description_template = 'feeds/rss_description.html'
def item_summary(self, strike):
return strike.company.name + ' - ' + strike.region.name
def item_link(self, strike):
return strike.get_absolute_url().replace('example', 'hagreve')
def item_pubdate(self, strike):
return strike.start_date
class AtomFeed(RssFeed):
feed_type = Atom1Feed
subtitle = RssFeed.description
class IcsFeed(Events):
filename = 'greves.ics'
def cal_name(self):
return u'Hoje há greve?'
def cal_desc(self):
return u'Veja se consegue chegar ao trabalho. Lembre-se que as informações podem estar desactualizadas.'
def items(self):
return filter((lambda x: not x.canceled), strikeItems())
def item_summary(self, strike):
return 'Greve da ' + strike.company.name + ' - ' + strike.region.name
def item_title(self, strike):
return strike.company.name + ' - ' + strike.region.name
def item_start(self, strike):
if strike.start_date == strike.end_date or strike.all_day:
return strike.start_date.date()
return strike.start_date.replace(tzinfo=tzlx)
def item_end(self, strike):
if strike.all_day or strike.start_date == strike.end_date:
return strike.start_date.date() + timedelta(days=1)
return strike.end_date.replace(tzinfo=tzlx)
def item_description(self, strike):
return self.item_comment(strike)
def item_comment(self, strike): #TODO: Correct this for all-day events
if strike.all_day:
return 'Greve da ' + strike.company.name + '\n' + 'Todo o dia ' + str(strike.start_date.date()) + '\n' + strike.description
return 'Greve da ' + strike.company.name + '\n' + 'De ' + str(strike.start_date) + ' a ' + str(strike.end_date) + '\n' + strike.description
#def item_link(self, strike):
# return strike.get_absolute_url().replace('example', 'hagreve')
| 39.524272 | 147 | 0.633997 |
c4b236bb7a8d9b3bab0b1123b6f212e3fb0e7d10
| 5,118 |
py
|
Python
|
pytorch/main.py
|
dberga/deep_utils
|
3ad59cb225a58021e1abbdc8a5b867023e2bcd9d
|
[
"Apache-2.0"
] | null | null | null |
pytorch/main.py
|
dberga/deep_utils
|
3ad59cb225a58021e1abbdc8a5b867023e2bcd9d
|
[
"Apache-2.0"
] | null | null | null |
pytorch/main.py
|
dberga/deep_utils
|
3ad59cb225a58021e1abbdc8a5b867023e2bcd9d
|
[
"Apache-2.0"
] | null | null | null |
#wget https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz
#tar -xvf flower_data.tar.gz flowers
#code adapted from https://github.com/Muhammad-MujtabaSaeed/102-Flowers-Classification/blob/master/102_Flowers_classification.ipynb
import torch
from torch import nn, optim
from torch.optim import lr_scheduler
import torch.utils.data as data
import torchvision
from torchvision import datasets, models, transforms
import json
import urllib.request
import copy
import seaborn as sns
from PIL import Image
from collections import OrderedDict
#matplotlib inline
#config InlineBackend.figure_format = 'retina'
import os
CUDA_VERSION=10.0
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" #use nvidia-smi bus id order
os.environ["CUDA_VISIBLE_DEVICES"]=str(1) #using nvidia ID 1
os.environ['PATH'] += ':/usr/local/cuda%s/bin/'%str(CUDA_VERSION); #path to cuda10.0
os.environ['LD_LIBRARY_PATH']= ':/usr/local/cuda%s/lib64/'%str(CUDA_VERSION) + ':/usr/lib/x86_64-linux-gnu/'; #path to cuda/cudnn lib
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #remove useless logs
import visualize_utils
import training_utils
################################################## PREPARE DATASET ##################################################
data_dir = './flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomRotation(45),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]),
'valid': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
}
image_datasets = {
x: datasets.ImageFolder(root=data_dir + '/' + x, transform=data_transforms[x])
for x in list(data_transforms.keys())
}
dataloaders = {
x: data.DataLoader(image_datasets[x], batch_size=4, shuffle=True, num_workers=2)
for x in list(image_datasets.keys())
}
dataset_sizes = {
x: len(dataloaders[x].dataset)
for x in list(image_datasets.keys())
}
class_names = image_datasets['train'].classes
dataset_sizes
urllib.request.urlretrieve('https://raw.githubusercontent.com/Muhammad-MujtabaSaeed/102-Flowers-Classification/master/cat_to_name.json','cat_to_name.json')
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
for i in range(0,len(class_names)):
class_names[i] = cat_to_name.get(class_names[i])
inputs, classes = next(iter(dataloaders['train']))
out = torchvision.utils.make_grid(inputs)
################################################## VISUALIZE DATASET ##################################################
#visualize_utils.imshow(out, title=[class_names[x] for x in classes])
################################################## PREPARE MODEL ##################################################
model_ft = models.resnet18(pretrained=True) # loading a pre-trained(trained on image net) resnet18 model from torchvision models
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 102) # changing the last layer for this dataset by setting last layer neurons to 102 as this dataset has 102 categories
################################################## PREPARE OPTIMIZER ##################################################
try:
checkpoint = torch.load('point_resnet_best.pth')
model_ft.load_state_dict(checkpoint['model'])
optimizer_ft.load_state_dict(checkpoint['optim'])
except:
criterion = nn.CrossEntropyLoss() # defining loss function
use_gpu = torch.cuda.is_available() # if gpu is available then use it
if use_gpu:
model_ft = model_ft.cuda()
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.0001, momentum=0.9) # defining optimizer with learning rate set to 0.0001
################################################## TRAIN ##################################################
#model_ft = training_utils.train_model(model_ft, criterion, optimizer_ft,dataloaders=dataloaders,use_gpu=use_gpu,dataset_sizes=dataset_sizes,num_epochs=20)
################################################## VISUALIZE ##################################################
#visualize_utils.visualize_model(model=model_ft,dataloaders=dataloaders,use_gpu=use_gpu,class_names=class_names,num_images=8)
################################################## VISUALIZE ##################################################
top1 ,top5 = training_utils.calc_accuracy(model_ft, 'test',dataloaders,use_gpu,dataset_sizes)
print(float(top1.avg))
print(float(top5.avg))
| 35.541667 | 155 | 0.617624 |
e3f7e7e2e1279a68fb92616082db7c9ba04241b4
| 562 |
py
|
Python
|
lowfat/utils.py
|
elena-kolomeets/lowfat
|
f7647f5cd12519f722e41808157a96cc3e37b6ce
|
[
"BSD-3-Clause"
] | 6 |
2017-02-23T16:44:36.000Z
|
2019-03-18T11:39:03.000Z
|
lowfat/utils.py
|
elena-kolomeets/lowfat
|
f7647f5cd12519f722e41808157a96cc3e37b6ce
|
[
"BSD-3-Clause"
] | 286 |
2017-02-07T15:00:41.000Z
|
2022-03-08T12:56:09.000Z
|
lowfat/utils.py
|
elena-kolomeets/lowfat
|
f7647f5cd12519f722e41808157a96cc3e37b6ce
|
[
"BSD-3-Clause"
] | 2 |
2018-06-19T12:38:08.000Z
|
2020-11-23T12:15:08.000Z
|
"""
This module contains small utility classes and functions which do not clearly belong to one part of the project.
"""
import enum
class ChoicesEnum(enum.Enum):
"""
Abstract Enum class to represent values in a Django CharField choices.
"""
@classmethod
def choices(cls):
"""
Get the list of choices for this class.
The name of the enum field is used as the human readable name.
The value of the enum field is stored in the database.
"""
return tuple((tag.value, tag.name) for tag in cls)
| 26.761905 | 112 | 0.658363 |
fbf6855d0b32768ac43601a056347d6f67bce3aa
| 1,888 |
py
|
Python
|
ipyx/__init__.py
|
davidbrochart/ipyx
|
bb8a8dbdeb3443d9a76949b92fe03e98802ddcfe
|
[
"BSD-3-Clause"
] | 3 |
2021-05-23T05:16:44.000Z
|
2021-08-25T05:19:50.000Z
|
ipyx/__init__.py
|
davidbrochart/ipyx
|
bb8a8dbdeb3443d9a76949b92fe03e98802ddcfe
|
[
"BSD-3-Clause"
] | 6 |
2021-05-23T09:52:08.000Z
|
2021-12-19T20:18:18.000Z
|
ipyx/__init__.py
|
davidbrochart/ipyx
|
bb8a8dbdeb3443d9a76949b92fe03e98802ddcfe
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) David Brochart.
# Distributed under the terms of the Modified BSD License.
from .x import X # noqa
from .f import F # noqa
from ._version import __version__, version_info # noqa
def _jupyter_labextension_paths():
"""Called by Jupyter Lab Server to detect if it is a valid labextension and
to install the widget
Returns
=======
src: Source directory name to copy files from. Webpack outputs generated files
into this directory and Jupyter Lab copies from this directory during
widget installation
dest: Destination directory name to install widget files to. Jupyter Lab copies
from `src` directory into <jupyter path>/labextensions/<dest> directory
during widget installation
"""
return [
{
"src": "labextension",
"dest": "ipyx",
}
]
def _jupyter_nbextension_paths():
"""Called by Jupyter Notebook Server to detect if it is a valid nbextension and
to install the widget
Returns
=======
section: The section of the Jupyter Notebook Server to change.
Must be 'notebook' for widget extensions
src: Source directory name to copy files from. Webpack outputs generated files
into this directory and Jupyter Notebook copies from this directory during
widget installation
dest: Destination directory name to install widget files to. Jupyter Notebook copies
from `src` directory into <jupyter path>/nbextensions/<dest> directory
during widget installation
require: Path to importable AMD Javascript module inside the
<jupyter path>/nbextensions/<dest> directory
"""
return [
{
"section": "notebook",
"src": "nbextension",
"dest": "ipyx",
"require": "ipyx/extension",
}
]
| 33.714286 | 88 | 0.663665 |
a2476a62ce82b865eff5d0b8d8d1da02baaf8c93
| 1,997 |
py
|
Python
|
Internet-technology/project2/http_protocol/response.py
|
xuwhao/neu-cs-master
|
6c6f3290c56170bf66353c8501092dd6401676b8
|
[
"MIT"
] | 1 |
2021-11-21T08:42:02.000Z
|
2021-11-21T08:42:02.000Z
|
Internet-technology/project2/http_protocol/response.py
|
xuwhao/neu-cs-master
|
6c6f3290c56170bf66353c8501092dd6401676b8
|
[
"MIT"
] | null | null | null |
Internet-technology/project2/http_protocol/response.py
|
xuwhao/neu-cs-master
|
6c6f3290c56170bf66353c8501092dd6401676b8
|
[
"MIT"
] | null | null | null |
"""
HTTP response.
"""
import logging
from status_codes import HTTP_STATUS_CODES
Log = logging.getLogger('StaticHttpServer.response')
class HttpResponse(object):
def __init__(self, protocol, status_code, range=None):
assert status_code in HTTP_STATUS_CODES, 'Unknown status code.'
self.protocol = protocol
self.status_code = status_code
self.headers = {}
self.range = range
self.content = ''
self.file = None
def __str__(self):
return 'HttpRequest (protocol=%s, status_code=%s)' % \
(self.protocol, self.status_code)
def write_to(self, output):
if self.file:
self.headers['Content-type'] = self.file.mime_type
self.headers['Content-Length'] = self.file.file_size
self.headers['Accept-Ranges'] = 'bytes'
if self.range:
range_start, range_end = self.file.calculate_range(self.range)
self.headers['Content-Range'] = 'bytes %s-%s/%s' % (range_start, range_end,
self.file.file_size)
self.headers['Content-Length'] = range_end - range_start + 1
response_msg = render_http_response(self)
output.sendall(response_msg)
Log.debug('Response:\n%s', response_msg)
if self.file:
self.file.stream_to(output, range=self.file.calculate_range(self.range))
def render_http_response(response):
ret_val = []
response_line = '%s %s %s' % (response.protocol, response.status_code,
HTTP_STATUS_CODES[response.status_code][0])
ret_val.append(response_line)
for key, value in response.headers.iteritems():
header_line = '%s: %s' % (key, value)
ret_val.append(header_line)
ret_val.append('')
if response.content:
ret_val.append(response.content)
else:
ret_val.append('')
return '\n'.join(ret_val)
| 28.528571 | 91 | 0.601903 |
3d90cf6b9f4896e58e73893137623034b81204d6
| 4,421 |
py
|
Python
|
scripts/twitter_streaming.py
|
aws-samples/finding-missing-persons-using-social-media-and-amazon-rekognition
|
1225a51ddf7e2eb8c4316a9fca3d5ec24e9c61a0
|
[
"MIT-0"
] | 52 |
2017-11-27T18:51:25.000Z
|
2022-01-02T13:49:21.000Z
|
scripts/twitter_streaming.py
|
aws-samples/finding-missing-persons-using-social-media-and-amazon-rekognition
|
1225a51ddf7e2eb8c4316a9fca3d5ec24e9c61a0
|
[
"MIT-0"
] | 1 |
2020-11-30T20:03:36.000Z
|
2020-11-30T20:03:36.000Z
|
scripts/twitter_streaming.py
|
aws-samples/finding-missing-persons-using-social-media-and-amazon-rekognition
|
1225a51ddf7e2eb8c4316a9fca3d5ec24e9c61a0
|
[
"MIT-0"
] | 31 |
2017-11-27T19:28:39.000Z
|
2021-12-12T12:18:39.000Z
|
import json
import boto3
import requests
from textblob import TextBlob
from ConfigParser import SafeConfigParser
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
# Read the Config File to get the twitter keys and tokens
config = SafeConfigParser()
config.read('twitter-rekognition.config')
# Create an S3 client
s3 = boto3.client('s3')
bucket = config.get('s3', 'twitter_bucket')
# Firehose delivery stream to stream tweets
fh = boto3.client('firehose')
deliverystream_name = config.get('firehose', 'deliverystream_name')
# Twitter Configuration keys
consumer_secret = config.get('keys', 'consumer_secret')
consumer_key = config.get('keys', 'consumer_key')
access_token = config.get('keys', 'access_token')
access_token_secret = config.get('keys', 'access_token_secret')
# Twitter user
user = "awsgrant"
if __name__ == '__main__':
try:
oauth = OAuth(access_token, access_token_secret, consumer_key, consumer_secret)
# Connect to Twitter Streaming API
#twitter_stream = TwitterStream(auth = oauth)
# UNCOMMENT when ready to test
twitter_stream = TwitterStream(auth = oauth, secure = True)
# Get an iterator on the public data following through Twitter
#tweet_iterator = twitter_stream.statuses.filter(locations='-180,-90,180,90')
#print(json.loads(twitter_stream))
# UNCOMMENT when ready to test
tweets = twitter_stream.statuses.filter(track=user)
for tweet in tweets:
#print json.dumps(tweet, indent=2, sort_keys=True)
#entities = tweet.get("entities")
entities = tweet.get("extended_entities")
print json.dumps(entities, indent=2, sort_keys=True)
if (entities):
print json.dumps(entities, indent=2, sort_keys=True)
media_list = entities.get("media")
if (media_list):
for media in media_list:
if (media.get("type", None) == "photo"):
#print json.dumps(media, indent=2, sort_keys=True)
twitter_data = {}
description = tweet.get("user").get("description")
loc = tweet.get("user").get("location")
text = tweet.get("text")
coords = tweet.get("coordinates")
geo = tweet.get("geo")
name = tweet.get("user").get("screen_name")
user_created = tweet.get("user").get("created_at")
followers = tweet.get("user").get("followers_count")
id_str = tweet.get("id_str")
created = tweet.get("created_at")
retweets = tweet.get("retweet_count")
bg_color = tweet.get("user").get("profile_background_color")
blob = TextBlob(text)
sent = blob.sentiment
image_url = media.get("media_url")
twitter_data['description'] = description
twitter_data['loc'] = loc
twitter_data['text'] = text
twitter_data['coords'] = coords
twitter_data['geo'] = geo
twitter_data['name'] = name
twitter_data['user_created'] = user_created
twitter_data['followers'] = followers
twitter_data['id_str'] = id_str
twitter_data['created'] = created
twitter_data['retweets'] = retweets
twitter_data['bg_color'] = bg_color
twitter_data['sent'] = sent
twitter_data['image_url'] = image_url
# Stream the content via Kinesis Firehose Deliver to S3
print("Sending to Kinesis")
response = fh.put_record(
DeliveryStreamName=deliverystream_name,
Record = {'Data': json.dumps(twitter_data, indent = 4)}
)
except Exception as e:
print (e)
| 44.21 | 88 | 0.533816 |
958ff706fc6e966657c2efe597083e032af3528d
| 1,275 |
py
|
Python
|
cohesity_management_sdk/models/tenant_id_data.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18 |
2019-09-24T17:35:53.000Z
|
2022-03-25T08:08:47.000Z
|
cohesity_management_sdk/models/tenant_id_data.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18 |
2019-03-29T19:32:29.000Z
|
2022-01-03T23:16:45.000Z
|
cohesity_management_sdk/models/tenant_id_data.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 16 |
2019-02-27T06:54:12.000Z
|
2021-11-16T18:10:24.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class TenantIdData(object):
"""Implementation of the 'TenantIdData' model.
Specifies id of a tenant.
Attributes:
tenant_id (string): Specifies the unique id of the tenant.
"""
# Create a mapping from Model property names to API property names
_names = {
"tenant_id":'tenantId'
}
def __init__(self,
tenant_id=None):
"""Constructor for the TenantIdData class"""
# Initialize members of the class
self.tenant_id = tenant_id
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
tenant_id = dictionary.get('tenantId')
# Return an object of this model
return cls(tenant_id)
| 24.056604 | 81 | 0.61098 |
020b186e6509773da609d05764751a4a7af166bb
| 707 |
py
|
Python
|
MoinMoin/datastruct/__init__.py
|
RealTimeWeb/wikisite
|
66a22c68c172f0ebb3c88a9885ccd33e2d59c3c5
|
[
"Apache-2.0"
] | 1 |
2016-04-01T04:02:28.000Z
|
2016-04-01T04:02:28.000Z
|
Documentation/ManualSource/wikicmd/MoinMoin/datastruct/__init__.py
|
sleyzerzon/soar
|
74a6f32ba1be3a7b3ed4eac0b44b0f4b2e981f71
|
[
"Unlicense"
] | 3 |
2020-06-26T21:21:32.000Z
|
2020-06-26T21:21:36.000Z
|
Documentation/ManualSource/wikicmd/MoinMoin/datastruct/__init__.py
|
sleyzerzon/soar
|
74a6f32ba1be3a7b3ed4eac0b44b0f4b2e981f71
|
[
"Unlicense"
] | 2 |
2017-01-25T20:06:44.000Z
|
2021-03-25T18:39:55.000Z
|
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - datastruct (groups and dicts) support.
@copyright: 2009 MoinMoin:DmitrijsMilajevs
@license: GPL, see COPYING for details
"""
from MoinMoin.datastruct.backends.wiki_dicts import WikiDicts
from MoinMoin.datastruct.backends.config_dicts import ConfigDicts
from MoinMoin.datastruct.backends.composite_dicts import CompositeDicts
from MoinMoin.datastruct.backends.wiki_groups import WikiGroups
from MoinMoin.datastruct.backends.config_groups import ConfigGroups
from MoinMoin.datastruct.backends.composite_groups import CompositeGroups
from MoinMoin.datastruct.backends import GroupDoesNotExistError
from MoinMoin.datastruct.backends import DictDoesNotExistError
| 35.35 | 73 | 0.847242 |
b69437b04f6cd8dbb7c24ddfab215e1a60689ed0
| 6,772 |
py
|
Python
|
python/craftassist/voxel_models/detection-transformer/datasets/coco.py
|
boldsort/craftassist
|
8058d115a250e30deb60d969b7b1a5fefd6e974c
|
[
"MIT"
] | 626 |
2019-07-18T18:40:44.000Z
|
2022-03-29T17:34:43.000Z
|
python/craftassist/voxel_models/detection-transformer/datasets/coco.py
|
boldsort/craftassist
|
8058d115a250e30deb60d969b7b1a5fefd6e974c
|
[
"MIT"
] | 42 |
2019-07-27T11:04:15.000Z
|
2021-02-23T03:15:14.000Z
|
python/craftassist/voxel_models/detection-transformer/datasets/coco.py
|
boldsort/craftassist
|
8058d115a250e30deb60d969b7b1a5fefd6e974c
|
[
"MIT"
] | 89 |
2019-07-19T15:07:39.000Z
|
2022-02-15T18:44:24.000Z
|
import copy
import os
import torch
import torch.utils.data
import torchvision
import transforms as T
from pycocotools import mask as coco_mask
class FilterAndRemapCocoCategories(object):
def __init__(self, categories, remap=True):
self.categories = categories
self.remap = remap
def __call__(self, image, target):
anno = target["annotations"]
anno = [obj for obj in anno if obj["category_id"] in self.categories]
if not self.remap:
target["annotations"] = anno
return image, target
anno = copy.deepcopy(anno)
for obj in anno:
obj["category_id"] = self.categories.index(obj["category_id"])
target["annotations"] = anno
return image, target
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
class ConvertCocoPolysToMask(object):
def __init__(self, return_masks=False):
self.return_masks = return_masks
def __call__(self, image, target):
w, h = image.size
image_id = target["image_id"]
image_id = torch.tensor([image_id])
anno = target["annotations"]
anno = [obj for obj in anno if "iscrowd" not in obj or obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
if self.return_masks:
segmentations = [obj["segmentation"] for obj in anno]
masks = convert_coco_poly_to_mask(segmentations, h, w)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
if self.return_masks:
masks = masks[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
if self.return_masks:
target["masks"] = masks
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
# for conversion to coco api
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno])
target["area"] = area[keep]
target["iscrowd"] = iscrowd[keep]
target["orig_size"] = torch.as_tensor([int(h), int(w)])
target["size"] = torch.as_tensor([int(h), int(w)])
return image, target
class CocoDetection(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms, return_masks):
super(CocoDetection, self).__init__(img_folder, ann_file)
self._transforms = transforms
self.prepare = ConvertCocoPolysToMask(return_masks)
def __getitem__(self, idx):
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = dict(image_id=image_id, annotations=target)
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
def get_in_coco_format(self, idx):
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = dict(image_id=image_id, annotations=target)
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
def make_coco_transforms(image_set):
normalize = T.Compose(
[T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]
)
scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
transform_train = T.Compose(
[
T.RandomHorizontalFlip(),
T.RandomSelect(
T.RandomResize(scales, max_size=1333),
T.Compose(
[
T.RandomResize([400, 500, 600]),
T.RandomSizeCrop(384, 600),
T.RandomResize(scales, max_size=1333),
]
),
),
normalize,
]
)
transform_val = T.Compose([T.RandomResize([800], max_size=1333), normalize])
transforms = {
"train": transform_train,
"trainval": transform_train,
"val": transform_val,
"test": transform_val,
}
return transforms[image_set]
def build(image_set, args):
root = "/datasets01/COCO/022719"
if args.crowdfree:
# has cleaned up training set, val set is unchanged
root_ann = "/checkpoint/szagoruyko/detection_transformer_shared/coco_instances_crowdfree"
else:
root_ann = root
mode = "instances"
anno_file_template = "{}_{}2017.json"
PATHS = {
"train": (
"train2017",
os.path.join("annotations", anno_file_template.format(mode, "train")),
),
"val": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val"))),
# this is a hack, change in the future
"trainval": (
"train2017",
os.path.join("annotations", anno_file_template.format(mode, "train")),
),
"test": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val"))),
}
img_folder, ann_file = PATHS[image_set]
img_folder = os.path.join(root, img_folder)
ann_file = os.path.join(root_ann, ann_file)
dataset = CocoDetection(
img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks
)
return dataset
| 32.873786 | 97 | 0.597165 |
ebbf1e7c40b3e1ddd280f6c2040d853a8a38a786
| 1,023 |
py
|
Python
|
state_machines/lasers/laser_states.py
|
drewc747/room-density-tracker
|
29afc7291276abeb3dcbd25410e3cf69a7ef3f96
|
[
"MIT"
] | null | null | null |
state_machines/lasers/laser_states.py
|
drewc747/room-density-tracker
|
29afc7291276abeb3dcbd25410e3cf69a7ef3f96
|
[
"MIT"
] | null | null | null |
state_machines/lasers/laser_states.py
|
drewc747/room-density-tracker
|
29afc7291276abeb3dcbd25410e3cf69a7ef3f96
|
[
"MIT"
] | null | null | null |
# laser_states.py
import sys
sys.path.insert(1, '../')
from state import State
# Start of laser states
'''
The state which indicated that a laser package is in dual laser mode
'''
class DualState(State):
'''
Fuction to return state based on input event
'''
def on_event(self, event):
if event == 'single':
return SingleState()
return self
'''
The state which indicated that a laser package is in single laser mode
'''
class SingleState(State):
'''
Fuction to return state based on input event
'''
def on_event(self, event):
if event == 'off':
return OffState()
elif event == 'dual':
return DualState()
return self
'''
The state which indicates that a laser package is off
'''
class OffState(State):
'''
Fuction to return state based on input event
'''
def on_event(self, event):
if event == 'single':
return SingleState()
return self
| 20.46 | 70 | 0.59042 |
4cb24cadffc9b78656273d003057453de758f210
| 1,478 |
py
|
Python
|
marble/components/monitor.py
|
mcgibbon/marble
|
801abdf65e112203d2b3c8983b0f73b0a4c821da
|
[
"BSD-3-Clause"
] | 3 |
2019-07-08T16:33:44.000Z
|
2019-09-03T18:34:25.000Z
|
marble/components/monitor.py
|
mcgibbon/marble
|
801abdf65e112203d2b3c8983b0f73b0a4c821da
|
[
"BSD-3-Clause"
] | null | null | null |
marble/components/monitor.py
|
mcgibbon/marble
|
801abdf65e112203d2b3c8983b0f73b0a4c821da
|
[
"BSD-3-Clause"
] | null | null | null |
import sympl as sp
import numpy as np
from marble.state import AliasDict
class NotAColumnException(Exception):
pass
class ColumnStore(sp.Monitor):
"""
Stores single-column values as numpy arrays to later retrieve a timeseries.
"""
def __init__(self, *args, **kwargs):
super(ColumnStore, self).__init__(*args, **kwargs)
self._column_lists = AliasDict()
def store(self, state):
"""
Store a given column state.
Units and dimensions are assumed to be the same each time the state is
stored. All arrays must be 0 or 1-dimensional.
Args:
state (dict): a state dictionary.
"""
for name, array in state.items():
if name == 'time':
pass
elif len(array.shape) > 1:
raise NotAColumnException(
'array for {} is not a column, has shape {}, dims {}'.format(
name, array.shape, array.dims)
)
elif len(array.shape) == 1:
self._column_lists[name] = self._column_lists.get(name, [])
self._column_lists[name].append(array.values[None, :])
elif len(array.shape) == 0:
self._column_lists[name] = self._column_lists.get(name, [])
self._column_lists[name].append(array.values[None])
def __getitem__(self, item):
return np.concatenate(self._column_lists[item], axis=0)
| 32.130435 | 81 | 0.577131 |
1a4b8c9b5179e3a976a417c6a77ba392025b46d9
| 11,645 |
py
|
Python
|
train_fr.py
|
cutz-j/SAROD
|
0da4497bee80cf84d2173e5386f8feaecf3900e8
|
[
"MIT"
] | 1 |
2021-10-14T23:40:11.000Z
|
2021-10-14T23:40:11.000Z
|
train_fr.py
|
cutz-j/SAROD
|
0da4497bee80cf84d2173e5386f8feaecf3900e8
|
[
"MIT"
] | 1 |
2021-01-09T08:00:30.000Z
|
2021-01-09T08:00:30.000Z
|
train_fr.py
|
cutz-j/SAROD
|
0da4497bee80cf84d2173e5386f8feaecf3900e8
|
[
"MIT"
] | 3 |
2021-01-07T11:27:46.000Z
|
2021-01-31T04:03:07.000Z
|
# import easydict
from multiprocessing import Process
import yaml
from pathlib import Path
import argparse
import torch
import tqdm
import numpy as np
import copy
# torch
import torchvision
from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.rpn import AnchorGenerator
from torchvision.models import mobilenet_v2
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision import transforms
# from yolov5.train_dt import yolov5
from EfficientObjectDetection.train_new_reward import EfficientOD
# import fr_utils
import munch
import os
import utils
from utils import load_filenames, load_dataset, load_dataloader, compute_map, convert_yolo2coco, label2idx, label_matching, reduce_dict, make_results
opt = {'epochs':100,
'batch_size':12,
'device':1,
'test_epoch':10,
'eval_epoch':2,
'step_batch_size':100,
'save_path':'save',
'save_freq': 5,
'rl_weight':None,
'print_freq': 50,
'h_detector_weight':'',
'l_detector_weight':'',
'fine_tr':'config/fine_tr.yaml',
'fine_eval':'config/fine_eval.yaml',
'coarse_tr':'config/coarse_tr.yaml',
'coarse_eval':'config/coarse_eval.yaml',
'EfficientOD':'config/EfficientOD.yaml',
'split': 4}
opt = munch.AutoMunch(opt)
# GPU Device
gpu_id = opt.device
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
use_cuda = torch.cuda.is_available()
print("GPU device " , use_cuda)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# training option load from yaml files
with open(opt.fine_tr) as f:
fine_tr = yaml.load(f, Loader=yaml.FullLoader)
with open(opt.fine_eval) as f:
fine_eval = yaml.load(f, Loader=yaml.FullLoader)
with open(opt.coarse_tr) as f:
coarse_tr = yaml.load(f, Loader=yaml.FullLoader)
with open(opt.coarse_eval) as f:
coarse_eval = yaml.load(f, Loader=yaml.FullLoader)
with open(opt.EfficientOD) as f:
efficient_config = yaml.load(f, Loader=yaml.FullLoader)
efficient_config['load'] = None # bug fix
epochs = opt.epochs
bs = opt.batch_size
# fine_detector = yolov5(fine_tr, fine_eval, epochs, bs)
# coarse_detector = yolov5(coarse_tr, coarse_eval, epochs, bs)
rl_agent = EfficientOD(efficient_config)
split_train_path = '/home/SSDD/ICIP21_dataset/800_HRSID/split_data_4_0/rl_ver/train/images'
split_val_path = '/home/SSDD/ICIP21_dataset/800_HRSID/split_data_4_0/rl_ver/val/images'
split_test_path = '/home/SSDD/ICIP21_dataset/800_HRSID/split_data_4_0/rl_ver/test/images'
split = 4
original_img_path = '/home/SSDD/ICIP21_dataset/800_HRSID/origin_data/rl_ver/'
original_img_path_train = original_img_path + 'train/images'
original_img_path_val = original_img_path + 'val/images'
original_img_path_test = original_img_path + 'test/images'
assert bs % split == 0, 'batch size should be divided with image split patch size'
num_classes = 2
fine_model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, num_classes=num_classes, pretrained_backbone=False)
coarse_model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, num_classes=num_classes, pretrained_backbone=False)
# # # # replace the classifier with a new one, that has
# # # # num_classes which is user-defined
# # # get number of input features for the classifier
fine_in_features = fine_model.roi_heads.box_predictor.cls_score.in_features
coarse_in_features = coarse_model.roi_heads.box_predictor.cls_score.in_features
# # # replace the pre-trained head with a new one
fine_model.roi_heads.box_predictor = FastRCNNPredictor(fine_in_features, num_classes)
coarse_model.roi_heads.box_predictor = FastRCNNPredictor(coarse_in_features, num_classes)
for fine_p, coarse_p in zip(fine_model.parameters(), coarse_model.parameters()):
fine_p.requires_grad = True
coarse_p.requires_grad = True
fine_model.to(device)
coarse_model.to(device)
# Optimizer
fine_params = [p for p in fine_model.parameters() if p.requires_grad]
coarse_params = [p for p in coarse_model.parameters() if p.requires_grad]
fine_optim = torch.optim.SGD(fine_params, lr=0.005, momentum=0.9, weight_decay=0.0005)
coarse_optim = torch.optim.SGD(coarse_params, lr=0.005, momentum=0.9, weight_decay=0.0005)
fine_lr_scheduler = torch.optim.lr_scheduler.StepLR(fine_optim, step_size=50)
coarse_lr_scheduler = torch.optim.lr_scheduler.StepLR(coarse_optim, step_size=50)
for e in range(epochs):
# label이 없더라도 loader에 image 생성
train_imgs = load_filenames(split_train_path, split, bs).files_array()
fine_train_dataset = load_dataset(train_imgs, fine_tr, bs)
coarse_train_dataset = load_dataset(train_imgs, fine_tr, bs)
fine_train_loader = load_dataloader(bs, fine_train_dataset)
coarse_train_loader = load_dataloader(bs, coarse_train_dataset)
fine_train_nb = len(fine_train_loader)
coarse_train_nb = len(coarse_train_loader)
assert fine_train_nb == coarse_train_nb, 'fine & coarse train batch number is not matched'
nb = fine_train_nb
# Logger
fine_metric_logger = utils.MetricLogger(delimiter=" ")
fine_metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
coarse_metric_logger = utils.MetricLogger(delimiter=" ")
coarse_metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
fine_header = 'Fine Epoch: [{}]'.format(e)
coarse_header = 'Coarse Epoch: [{}]'.format(e)
# # warmup
fine_lr_scheduler = None
corase_lr_scheduler = None
if e == 0:
warmup_factor = 1. / 1000
warmup_iters = min(1000, fine_train_nb-1)
fine_lr_scheduler = utils.warmup_lr_scheduler(fine_optim, warmup_iters, warmup_factor)
coarse_lr_scheduler = utils.warmup_lr_scheduler(coarse_optim, warmup_iters, warmup_factor)
for i, (fine_train, coarse_train) in enumerate(zip(fine_train_loader, coarse_train_loader)):
# train
fine_model.train()
coarse_model.train()
#### fine train ###
# Label mathching
fine_imgs, fine_labels = label_matching(fine_train, device)
fine_imgs = fine_imgs.to(device) / 255.
## train: img normalization --> not, zerodivision err
fine_loss_dict = fine_model(fine_imgs, copy.deepcopy(fine_labels))
fine_losses = sum(loss for loss in fine_loss_dict.values())
fine_loss_dict_reduced = reduce_dict(fine_loss_dict)
fine_loss_reduced = sum(loss for loss in fine_loss_dict_reduced.values())
fine_loss_val = fine_loss_reduced.item()
# optimizer
fine_optim.zero_grad()
fine_losses.backward()
fine_optim.step()
if fine_lr_scheduler is not None:
fine_lr_scheduler.step()
fine_metric_logger.update(loss=fine_loss_reduced, **fine_loss_dict_reduced)
fine_metric_logger.update(lr=fine_optim.param_groups[0]["lr"])
if i % opt.print_freq ==0:
space_fmt = ':' + str(len(str(fine_train_nb))) + 'd'
log_msg = fine_metric_logger.delimiter.join([fine_header, '[{0' + space_fmt + '}/{1}]', '{meters}'])
print(log_msg.format(i, fine_train_nb, meters=str(fine_metric_logger)))
### coarse train ###
# Label mathching
coarse_imgs, coarse_labels = label_matching(coarse_train, device)
coarse_imgs = coarse_imgs.to(device) / 255.
## train: img normalization --> not, zerodivision err
coarse_loss_dict = coarse_model(coarse_imgs, copy.deepcopy(coarse_labels))
coarse_losses = sum(loss for loss in coarse_loss_dict.values())
# utils
coarse_loss_dict_reduced = reduce_dict(coarse_loss_dict)
coarse_loss_reduced = sum(loss for loss in coarse_loss_dict_reduced.values())
coarse_loss_val = coarse_loss_reduced.item()
# optimizer
coarse_optim.zero_grad()
coarse_losses.backward()
coarse_optim.step()
if coarse_lr_scheduler is not None:
coarse_lr_scheduler.step()
coarse_metric_logger.update(loss=coarse_loss_reduced, **coarse_loss_dict_reduced)
coarse_metric_logger.update(lr=fine_optim.param_groups[0]["lr"])
if i % opt.print_freq ==0:
space_fmt = ':' + str(len(str(fine_train_nb))) + 'd'
log_msg = coarse_metric_logger.delimiter.join([coarse_header, '[{0' + space_fmt + '}/{1}]', '{meters}'])
print(log_msg.format(i, fine_train_nb, meters=str(coarse_metric_logger)))
## train eval
# result = (source_path, paths[si], mp, mr, map50, nl, stats)
# file_name, od_file_dir, mp=0(skip), ma=0(skip), map50(will be soon), objnum, stat
# stat = 4
# make_results(model, dataset, device)
fine_results = make_results(fine_model, fine_train, device)
coarse_results = make_results(coarse_model, coarse_train, device)
# conf_thresh=0.001 / iou_thres=0.6
rl_agent.train(e, i, nb, fine_results, coarse_results, original_data_path=original_img_path_train)
## Validation
if e % 1 == 0:
fine_dataset, coarse_dataset, policies = rl_agent.eval(split_val_path, original_img_path_val)
print(len(fine_dataset.tolist()))
print(len(coarse_dataset.tolist()))
fine_results, coarse_results = [], []
if len(fine_dataset.tolist()) > 0:
fine_val_dataset = load_dataset(fine_dataset, fine_tr, bs)
fine_val_loader = load_dataloader(bs, fine_val_dataset)
fine_nb = len(fine_val_loader)
for i, fine_val in tqdm.tqdm(enumerate(fine_val_loader), total=fine_nb):
fine_results += make_results(fine_model, fine_val, device)
if len(coarse_dataset.tolist()) > 0:
coarse_val_dataset = load_dataset(coarse_dataset, fine_tr, bs)
coarse_val_loader = load_dataloader(bs, coarse_val_dataset)
coarse_nb = len(coarse_train_loader)
for i, coarse_val in tqdm.tqdm(enumerate(coarse_val_loader), total=coarse_nb):
coarse_results += make_results(coarse_model, coarse_val, device)
map50 = compute_map(fine_results, coarse_results)
print('Validation MAP: \n', map50)
# save
if e % opt.save_freq == 0:
torch.save(fine_model, os.path.join(opt.save, 'fine_model'))
torch.save(coarse_model, os.path.join(opt.save, 'coarse_model'))
# Testing
fine_dataset, coarse_dataset, policies = rl_agent.eval(split_test_path, original_img_path_test)
fine_results, coarse_results = [], []
if len(fine_dataset.tolist()) > 0:
fine_test_dataset = load_dataset(fine_dataset, fine_tr, bs)
fine_test_loader = load_dataloader(bs, fine_test_dataset)
fine_nb = len(fine_test_loader)
for i, fine_test in tqdm.tqdm(enumerate(fine_test_loader), total=fine_nb):
fine_results += make_results(fine_model, fine_test, device)
if len(coarse_dataset.tolist()) > 0:
coarse_test_dataset = load_dataset(coarse_dataset, fine_tr, bs)
coarse_test_loader = load_dataloader(bs, coarse_test_dataset)
coarse_nb = len(coarse_test_loader)
for i, coarse_test in tqdm.tqdm(enumerate(coarse_test_loader), total=coarse_nb):
coarse_results += make_results(coarse_model, coarse_test, device)
map50 = compute_map(fine_results, coarse_results)
print('MAP: \n', map50)
with open('test_result.txt', 'a') as f:
f.write(str(map50))
with open('test_policies.txt', 'a') as f:
f.write(str(policies))
| 41.294326 | 149 | 0.708974 |
5e03728270dbe7ab8f2e4c2cc4f127986f8b9302
| 76,854 |
py
|
Python
|
thonny/plugins/micropython/__init__.py
|
aroberge/thonny
|
919769139c9cbfdfa2b78f6a6f0a3d9ecee56e28
|
[
"MIT"
] | null | null | null |
thonny/plugins/micropython/__init__.py
|
aroberge/thonny
|
919769139c9cbfdfa2b78f6a6f0a3d9ecee56e28
|
[
"MIT"
] | null | null | null |
thonny/plugins/micropython/__init__.py
|
aroberge/thonny
|
919769139c9cbfdfa2b78f6a6f0a3d9ecee56e28
|
[
"MIT"
] | null | null | null |
import ast
import logging
import io
import os
import platform
import queue
import re
import subprocess
import sys
import textwrap
import threading
import time
import tokenize
import traceback
import webbrowser
from queue import Queue
from textwrap import dedent
from time import sleep
from tkinter import ttk
from thonny.ui_utils import askopenfilename, create_url_label
from typing import Optional
import jedi
import serial.tools.list_ports
from serial import SerialException
from thonny import common, get_runner, get_shell, get_workbench
from thonny.common import (
BackendEvent,
InlineResponse,
MessageFromBackend,
ToplevelCommand,
ToplevelResponse,
)
from thonny.config_ui import ConfigurationPage
from thonny.misc_utils import find_volumes_by_name
from thonny.plugins.backend_config_page import BackendDetailsConfigPage
from thonny.running import BackendProxy
from thonny.ui_utils import SubprocessDialog, create_string_var, show_dialog
EOT = b"\x04"
NORMAL_PROMPT = b">>> "
# first prompt when switching to raw mode (or after soft reboot in raw mode)
FIRST_RAW_PROMPT = b"raw REPL; CTRL-B to exit\r\n>"
RAW_PROMPT = b">"
TIMEOUT = 0.1
EOT_WITH_RAW_PROMPT = "\x04>"
THONNY_START = "<ForThonny>"
THONNY_END = "</ForThonny>"
THONNY_MSG_START = b"\x02"
NEWLINE = "\n"
DEFAULT_WEBREPL_URL = "ws://192.168.4.1:8266/"
# TODO: Current code has some preparations in place to make automatic initial interrupt optional
# It's not so easy, though (initial interrupt also fetches some required info etc)
_AUTOMATIC_INTERRUPT = True
class MicroPythonProxy(BackendProxy):
def __init__(self, clean):
super().__init__(clean)
self._non_serial_msg_queue = Queue()
self._last_toplevel_command = None
self._has_been_idle = False
self._ctrl_c_notice_has_been_removed = False
self._baudrate = 115200
self._reading_cancelled = False
self._welcome_text = ""
self._discarded_bytes = bytearray()
self._builtins_info = self._get_builtins_info()
# TODO: provide default builtins for script completion
self._builtin_modules = []
self.__idle = False
self._connection = self._create_connection()
if self._connection is not None and (clean or _AUTOMATIC_INTERRUPT):
try:
self._interrupt_to_prompt(clean)
self._builtin_modules = self._fetch_builtin_modules()
except TimeoutError:
read_bytes = bytes(
self._discarded_bytes + self._connection._read_buffer
)
self._show_error_connect_again(
"Could not connect to REPL.\n"
+ "Make sure your device has suitable firmware and is not in bootloader mode!\n"
+ "Bytes read: "
+ str(read_bytes)
+ "\nDisconnecting."
)
self.disconnect()
except:
self.disconnect()
raise
self._start_time = time.time()
def send_command(self, cmd):
if isinstance(cmd, ToplevelCommand):
self._last_toplevel_command = cmd
if cmd.name in ["editor_autocomplete", "cd", "dump_api_info", "lsdevice"]:
# Works even without connection to the board
return super().send_command(cmd)
elif self._connection is None:
return "discard"
elif self.idle:
try:
if not self._connection.buffers_are_empty():
discarded = self._connection.read_all()
self._send_error_to_shell(
"Warning: when issuing %r,\nincoming was not emtpy: %r"
% (cmd, discarded)
)
return super().send_command(cmd)
except SerialException as e:
self._handle_serial_exception(e)
return "discard"
else:
return "postpone"
def send_program_input(self, data: str) -> None:
# TODO: what if there is a previous unused data waiting
assert self._connection.outgoing_is_empty()
assert data.endswith("\n")
if not data.endswith("\r\n"):
input_str = data[:-1] + "\r\n"
data = input_str.encode("utf-8")
try:
self._connection.write(data)
# Try to consume the echo
try:
echo = self._connection.read(len(data))
except queue.Empty:
# leave it.
logging.warning("Timeout when reading echo")
return
if echo != data:
# because of autoreload? timing problems? interruption?
# Leave it.
logging.warning("Unexpected echo. Expected %s, got %s" % (data, echo))
self._connection.unread(echo)
except SerialException as e:
self._handle_serial_exception(e)
def fetch_next_message(self):
if not self._non_serial_msg_queue.empty():
msg = self._non_serial_msg_queue.get_nowait()
elif self._connection is not None:
# Provide guidance for Ctrl-C
if time.time() - self._start_time > 0.5:
if not self._has_been_idle:
"""TODO: get_shell().set_notice("Use Ctrl-C to interrupt the program and/or enter the REPL")"""
else:
if not self._ctrl_c_notice_has_been_removed:
"""TODO: get_shell().set_notice(None)"""
self._ctrl_c_notice_has_been_removed = True
# TODO: fetch required info if automatic interrupt is disabled
# get the message
try:
msg = self._read_next_serial_message()
# if msg:
# print("GOT", msg)
except SerialException as e:
self._handle_serial_exception(e)
return None
else:
msg = None
return self.transform_message(msg)
def interrupt(self):
if self._connection is None:
return
try:
self.idle = False
self._connection.reset_output_buffer()
self._connection.write(b"\x03")
# Wait a bit to avoid the situation where part of the prompt will
# be treated as output and whole prompt is not detected.
# (Happened with Calliope)
sleep(0.1)
except SerialException as e:
self._handle_serial_exception(e)
def destroy(self):
self.disconnect()
def disconnect(self):
if self._connection is not None:
try:
self._connection.close()
self._send_text_to_shell(
"\n\nConnection closed.\nSelect Run → Stop/Restart or press Ctrl+F2 to connect again.",
"stdout",
)
except Exception as e:
logging.exception("Problem when closing serial")
self._send_error_to_shell(
"Problem when closing serial connection: " + str(e)
)
self._connection = None
def is_connected(self):
return self._connection is not None
def is_functional(self):
return self.is_connected()
def _create_connection(self):
port = get_workbench().get_option(self.backend_name + ".port")
if port == "webrepl":
return self._create_webrepl_connection()
else:
return self._create_serial_connection(port)
def _create_serial_connection(self, port):
if port is None or port == "None":
self._send_text_to_shell(
'Not connected. Choose "Tools → Options → Backend" to change.', "stdout"
)
return None
if port == "auto":
potential = self._detect_potential_ports()
if len(potential) == 1:
port = potential[0][0]
else:
message = dedent(
"""\
Couldn't find the device automatically.
Check the connection (making sure the device is not in bootloader mode)
or choose "Tools → Options → Backend" to select the port manually."""
)
if len(potential) > 1:
_, descriptions = zip(*potential)
message += "\n\nLikely candidates are:\n * " + "\n * ".join(
descriptions
)
self._show_error_connect_again(message)
return None
try:
return SerialConnection(port, baudrate=self._baudrate)
except SerialException as error:
traceback.print_exc()
message = "Unable to connect to " + port + "\n" + "Error: " + str(error)
# TODO: check if these error codes also apply to Linux and Mac
if error.errno == 13 and platform.system() == "Linux":
# TODO: check if user already has this group
message += "\n\n" + dedent(
"""\
Try adding yourself to the 'dialout' group:
> sudo usermod -a -G dialout <username>
(NB! This needs to be followed by reboot or logging out and logging in again!)"""
)
elif "PermissionError" in message:
message += "\n\n" + dedent(
"""\
If you have serial connection to the device from another program,
then disconnect it there."""
)
elif error.errno == 16:
message += "\n\n" + "Try restarting the device."
self._show_error_connect_again(message)
return None
def _create_webrepl_connection(self):
url = get_workbench().get_option(self.backend_name + ".webrepl_url")
password = get_workbench().get_option(self.backend_name + ".webrepl_password")
print("URL", url)
try:
conn = WebReplConnection(url, password)
except:
e_type, e_value, _ = sys.exc_info()
self._send_error_to_shell(
"Could not connect to "
+ url
+ "\nError: "
+ "\n".join(traceback.format_exception_only(e_type, e_value))
)
return None
conn.read_until([b"WebREPL connected\r\n"])
return conn
def _show_error_connect_again(self, msg):
self._send_error_to_shell(
msg
+ "\n\nCheck the configuration, select Run → Stop/Restart or press Ctrl+F2 to try again."
+ "\n(On some occasions it helps to wait before trying again.)"
)
def _detect_potential_ports(self):
all_ports = list_serial_ports()
"""
for p in all_ports:
print(p.description,
p.device,
None if p.vid is None else hex(p.vid),
None if p.pid is None else hex(p.pid),
)
"""
return [
(p.device, p.description)
for p in all_ports
if (p.vid, p.pid) in self.known_usb_vids_pids
or p.description in self.known_port_descriptions
or ("USB" in p.description and "serial" in p.description.lower())
or "UART" in p.description
or "DAPLink" in p.description
]
@property
def idle(self):
return self.__idle
@idle.setter
def idle(self, value):
if self.__idle != value:
logging.debug("Serial idle %s => %s", self.__idle, value)
self.__idle = value
if value:
self._has_been_idle = True
def _fetch_builtin_modules(self):
assert self.idle
out, err = self._execute_and_get_response("help('modules')")
assert err == b"", "Error was: %r" % err
modules_str = (
out.decode("utf-8")
.replace("Plus any modules on the filesystem", "")
.replace("/__init__", "")
.replace("/", ".")
)
return modules_str.split()
def _fetch_uname(self):
assert self.idle
res = self._execute_and_parse_value(
"import os as __os_; print(repr(tuple(__os_.uname()))); del __os_"
)
return {
"sysname": res[0],
"nodename": res[1],
"release": res[2],
"version": res[3],
"machine": res[4],
}
def _interrupt_to_prompt(self, clean, timeout=8):
assert self._connection is not None
timer = TimeHelper(timeout)
# NB! Sometimes disconnecting and reconnecting (on macOS?)
# too quickly causes anomalies. See CalliopeMiniProxy for more details
for delay in [0.05, 0.5, 2.0, 3.0]:
# Interrupt several times, because with some drivers first interrupts seem to vanish
self._connection.reset_output_buffer()
self._connection.write(b"\x03") # interrupt
self._connection.write(b"\x01") # raw mode
sleep(delay)
self._discarded_bytes += self._connection.read_all()
if self._discarded_bytes.endswith(
FIRST_RAW_PROMPT
) or self._discarded_bytes.endswith(b"\r\n>"):
break
else:
raise TimeoutError("Can't get to raw prompt")
self._welcome_text = self._get_welcome_text_in_raw_mode(timer.time_left)
if clean:
self._clean_environment_during_startup(timer.time_left)
self._finalize_repl()
# report ready
self._non_serial_msg_queue.put(
ToplevelResponse(welcome_text=self._welcome_text.strip())
)
self.idle = True
def _clean_environment_during_startup(self, time_left):
# In MP Ctrl+D doesn't run user code, in CP it does
self._connection.write(b"\x04")
self._discarded_bytes = self._connection.read_until(
[FIRST_RAW_PROMPT, RAW_PROMPT], time_left
)
def _get_welcome_text_in_raw_mode(self, timeout):
timer = TimeHelper(timeout)
# get welcome text with Ctrl+B
self._connection.write(b"\x02")
welcome_text = (
self._connection.read_until(NORMAL_PROMPT, timer.time_left)
.strip(b"\r\n >")
.decode("utf-8", "replace")
)
if os.name != "nt":
welcome_text = welcome_text.replace("\r\n", "\n")
# Go back to raw prompt
self._connection.write(b"\x01")
self._connection.read_until((FIRST_RAW_PROMPT, b"\x04>"), timer.time_left)
return welcome_text + " [backend=" + self.get_backend_name() + "]"
def _finalize_repl(self):
pass
def _soft_reboot_and_run_main(self):
if self._connection is None:
return
if not self.idle:
# TODO: ignore??
# self._connection.write(b"\r\x03")
self.interrupt()
get_runner()._set_state("running")
self.idle = False
# Need to go to normal mode. MP doesn't run user code in raw mode
# (CP does, but it doesn't hurt to do it there as well)
self._connection.write(b"\x02")
self._connection.read_until(NORMAL_PROMPT)
self._connection.write(b"\x04")
# Returning to the raw prompt will be handled by
# _read_next_serial_message
def _clear_environment(self):
assert self.idle
# TODO: Ctrl+D in raw repl is perfect for MicroPython
# but on CircuitPython it runs main.py
# TODO: which is better:
# self._execute_async(dedent("""
# for name in globals():
# if not name.startswith("__"):
# del globals()[name]
# """).strip())
# or
out, err = self._execute_and_get_response(
dedent(
"""
globals().clear()
__name__ == '__main__'
"""
).strip()
)
assert out == b""
assert err == b""
def _handle_serial_exception(self, e):
logging.exception("MicroPython serial error")
self._show_error_connect_again("\nLost connection to the device (%s)." % e)
self.idle = False
try:
self._connection.close()
except Exception:
logging.exception("Closing serial")
finally:
self._connection = None
def _execute_async(self, script):
"""Executes given MicroPython script on the device"""
assert self._connection.buffers_are_empty()
# print("----\n",script,"\n---")
command_bytes = script.encode("utf-8")
self._connection.write(command_bytes + b"\x04")
self.idle = False
# fetch confirmation
ok = self._connection.read(2)
assert ok == b"OK", "Expected OK, got %s, followed by %s" % (
ok,
self._connection.read_all(),
)
def _execute_and_get_response(self, script):
self._execute_async(script)
terminator = b"\x04>"
output = self._connection.read_until(terminator)[: -len(terminator)]
self.idle = True
return output.split(b"\x04")
def _execute_and_parse_value(self, script):
out, err = self._execute_and_get_response(script)
if err:
# display script on error
self._send_text_to_shell(script, "stderr")
# TODO: report the error to stderr
assert len(err) == 0, "Error was " + repr(err)
return ast.literal_eval(out.strip().decode("utf-8"))
def _execute_and_expect_empty_response(self, script):
out, err = self._execute_and_get_response(script)
if out or err:
# display script on error
self._send_text_to_shell(script, "stderr")
assert len(out) == 0, "Output was " + repr(out)
assert len(err) == 0, "Error was " + repr(err)
def _cmd_cd(self, cmd):
assert len(cmd.args) == 1
path = cmd.args[0]
if os.path.exists(path):
self._non_serial_msg_queue.put(ToplevelResponse(cwd=path))
else:
self._non_serial_msg_queue.put(
ToplevelResponse(error="Path doesn't exist: %s" % path)
)
def _cmd_Run(self, cmd):
self._clear_environment()
if not hasattr(cmd, "source"):
assert len(cmd.args) == 1
filename = cmd.args[0]
if os.path.isabs(filename):
full_filename = filename
else:
full_filename = os.path.join(get_workbench().get_cwd(), filename)
cmd.script_path = full_filename
with tokenize.open(full_filename) as fp:
source = fp.read()
else:
source = cmd.source
self._execute_async(source)
def _cmd_execute_source(self, cmd):
try:
# Try to parse as expression
ast.parse(cmd.source, mode="eval")
# If it didn't fail then source is an expression
msg_template = """{'message_class':'ToplevelResponse', 'value_info':(id(v), repr(v))}"""
self._execute_async(
"print('\\x04\\x02', [%s for v in [%s]][0])"
% (msg_template, cmd.source.strip())
)
except SyntaxError:
# source is a statement (or invalid syntax)
self._execute_async(cmd.source)
def _cmd_get_globals(self, cmd):
if not get_runner().is_waiting_toplevel_command():
return "postpone"
try:
if cmd.module_name == "__main__":
self._execute_async(
"print('\\x04\\x02', {'message_class' : 'InlineResponse', 'command_name':'get_globals', 'module_name' : '__main__', 'globals':{x:repr(globals()[x]) for x in globals() if not x.startswith('__')}})"
)
else:
self._execute_async(
dedent(
"""
try:
import %(mod_name)s as __modForGlobs
print('\\x04\\x02', {'message_class' : 'InlineResponse', 'command_name':'get_globals', 'module_name' : '%(mod_name)s', 'globals':{name : repr(getattr(__modForGlobs, name)) for name in dir(__modForGlobs) if not name.startswith('__')}})
del __modForGlobs
except Exception as e:
print('\\x04\\x02', {'message_class' : 'InlineResponse', 'command_name':'get_globals', 'module_name' : '%(mod_name)s', 'globals':{}, 'error' : 'Error querying globals:\\n' + str(e)})
"""
% {"mod_name": cmd.module_name}
)
)
except Exception:
self._non_serial_msg_queue.put(
InlineResponse(
command_name="get_globals",
module_name=cmd.module_name,
globals={},
error="Error requesting globals:\\n" + traceback.format_exc(),
)
)
return None
def _cmd_get_dirs_child_data(self, cmd):
if not self._welcome_text:
return "postpone"
if "micro:bit" in self._welcome_text.lower():
return self._cmd_get_dirs_child_data_microbit(cmd)
else:
return self._cmd_get_dirs_child_data_generic(cmd)
def _cmd_get_dirs_child_data_microbit(self, cmd):
"""let it be here so micro:bit works with generic proxy as well"""
assert cmd["paths"] == {""}
try:
self._execute_async(
dedent(
"""
try:
import os as __temp_os
print('\\x04\\x02', {
'message_class' : 'InlineResponse',
'command_name': 'get_dirs_child_data',
'node_id' : '%(node_id)s',
'dir_separator' : '',
'data': {'' : {name : __temp_os.size(name) for name in __temp_os.listdir()}}
})
del __temp_os
except Exception as e:
print('\\x04\\x02', {
'message_class' : 'InlineResponse',
'command_name':'get_dirs_child_data',
'node_id' : '%(node_id)s',
'dir_separator' : '',
'data':{},
'error' : 'Error getting file data: ' + str(e)
})
"""
% {"paths": cmd.paths, "node_id": cmd.node_id}
)
)
except Exception:
self._non_serial_msg_queue.put(
InlineResponse(
command_name="get_dirs_child_data",
error="Error requesting file data:\\n" + traceback.format_exc(),
)
)
return None
def _cmd_get_dirs_child_data_generic(self, cmd):
try:
self._execute_async(
dedent(
"""
try:
import os as __temp_os
# Init all vars, so that they can be deleted
# even if loop makes no iterations
__temp_result = {}
__temp_path = None
__temp_st = None
__temp_children = None
__temp_name = None
__temp_real_path = None
__temp_full = None
for __temp_path in %(paths)r:
__temp_real_path = __temp_path or '/'
__temp_children = {}
for __temp_name in __temp_os.listdir(__temp_real_path):
__temp_full = (__temp_real_path + '/' + __temp_name).replace("//", "/")
# print("processing", __temp_full)
__temp_st = __temp_os.stat(__temp_full)
if __temp_st[0] & 0o170000 == 0o040000:
# directory
__temp_children[__temp_name] = None
else:
__temp_children[__temp_name] = __temp_st[6]
__temp_result[__temp_path] = __temp_children
del __temp_os
del __temp_st
del __temp_children
del __temp_name
del __temp_path
del __temp_full
print('\\x04\\x02', {
'message_class' : 'InlineResponse',
'command_name': 'get_dirs_child_data',
'node_id' : '%(node_id)s',
'dir_separator' : '/',
'data': __temp_result
})
del __temp_result
except Exception as e:
print('\\x04\\x02', {
'message_class' : 'InlineResponse',
'command_name':'get_dirs_child_data',
'dir_separator' : '/',
'node_id' : '%(node_id)s',
'data':{},
'error' : 'Error getting file data: ' + str(e)
})
"""
% {"paths": cmd.paths, "node_id": cmd.node_id}
)
)
except Exception:
self._non_serial_msg_queue.put(
InlineResponse(
command_name="get_dirs_child_data",
error="Error requesting file data:\\n" + traceback.format_exc(),
)
)
return None
def _cmd_editor_autocomplete(self, cmd):
# template for the response
msg = InlineResponse(
command_name="editor_autocomplete",
source=cmd.source,
row=cmd.row,
column=cmd.column,
error=None,
)
try:
script = jedi.Script(
cmd.source, cmd.row, cmd.column, sys_path=[self._get_api_stubs_path()]
)
completions = script.completions()
except Exception:
msg["error"] = "Autocomplete error"
self._non_serial_msg_queue.put(msg)
return
msg["completions"] = self.filter_completions(completions)
self._non_serial_msg_queue.put(msg)
def filter_completions(self, completions):
# filter out completions not applicable to MicroPython
result = []
for completion in completions:
if completion.name.startswith("__"):
continue
parent_name = completion.parent().name
name = completion.name
root = completion.full_name.split(".")[0]
# jedi proposes names from CPython builtins
if root in self._builtins_info and name not in self._builtins_info[root]:
continue
if parent_name == "builtins" and name not in self._builtins_info:
continue
result.append({"name": name, "complete": completion.complete})
return result
def _cmd_shell_autocomplete(self, cmd):
source = cmd.source
# TODO: combine dynamic results and jedi results
if source.strip().startswith("import ") or source.strip().startswith("from "):
# this needs the power of jedi
msg = InlineResponse(
command_name="shell_autocomplete", source=cmd.source, error=None
)
try:
# at the moment I'm assuming source is the code before cursor, not whole input
lines = source.split("\n")
script = jedi.Script(
source,
len(lines),
len(lines[-1]),
sys_path=[self._get_api_stubs_path()],
)
completions = script.completions()
msg["completions"] = self.filter_completions(completions)
except Exception:
msg["error"] = "Autocomplete error"
self._non_serial_msg_queue.put(msg)
else:
# use live data
regex = re.search(
r"(\w+\.)*(\w+)?$", source
) # https://github.com/takluyver/ubit_kernel/blob/master/ubit_kernel/kernel.py
if regex:
n = regex.group()
# the response format is not the same as expected by the gui
# but it will be transformed later
if "." in n:
obj, n = n.rsplit(".", 1)
self._execute_async(
"print('\\x04\\x02', {'message_class' : 'InlineResponse', 'command_name': 'shell_autocomplete', 'match':"
+ repr(n)
+ ", 'source':"
+ repr(source)
+ ", 'names':dir("
+ obj
+ ")})"
)
else:
self._execute_async(
"print('\\x04\\x02', {'message_class' : 'InlineResponse', 'command_name': 'shell_autocomplete', 'match':"
+ repr(n)
+ ", 'source':"
+ repr(source)
+ ", 'names':dir()})"
)
def _cmd_dump_api_info(self, cmd):
"For use during development of the plug-in"
try:
self._execute_and_expect_empty_response(
dedent(
"""
def __print_object_atts(obj):
import gc
result = []
errors = []
for name in dir(obj):
try:
val = getattr(obj, name)
result.append((name, repr(val), repr(type(val))))
except BaseException as e:
errors.append("Couldn't get attr '%s' from object '%r', Err: %r" % (name, obj, e))
print((result, errors))
gc.collect()
"""
)
)
for module_name in sorted(self._fetch_builtin_modules()):
if (
not module_name.startswith("_")
# and not module_name.startswith("ada")
# and not module_name == "builtins"
):
# self._send_text_to_shell("Dumping " + module_name + "\n", "stdout")
file_name = os.path.join(
self._get_api_stubs_path(),
module_name.replace(".", "/") + ".py",
)
self._dump_module_stubs(module_name, file_name)
finally:
self._non_serial_msg_queue.put(ToplevelResponse())
def _dump_module_stubs(self, module_name, file_name):
_, err = self._execute_and_get_response("import {0}".format(module_name))
if err:
print("FAILED IMPORTING MODULE:", module_name, "\nErr: " + repr(err))
return
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with io.open(file_name, "w", encoding="utf-8", newline="\n") as fp:
if module_name not in [
"webrepl",
"_webrepl",
"gc",
"http_client",
"http_client_ssl",
"http_server",
"framebuf",
"example_pub_button",
"flashbdev",
]:
self._dump_object_stubs(fp, module_name, "")
_, err = self._execute_and_get_response("del {0}".format(module_name))
def _dump_object_stubs(self, fp, object_expr, indent):
if object_expr in [
"docs.conf",
"pulseio.PWMOut",
"adafruit_hid",
"upysh",
# "webrepl",
# "gc",
# "http_client",
# "http_server",
]:
print("SKIPPING problematic name:", object_expr)
return
print("DUMPING", indent, object_expr)
items, errors = self._execute_and_parse_value(
"__print_object_atts({0})".format(object_expr)
)
if errors:
print("ERRORS", errors)
for name, rep, typ in sorted(items, key=lambda x: x[0]):
if name.startswith("__"):
continue
print("DUMPING", indent, object_expr, name)
self._send_text_to_shell(" * " + name + " : " + typ, "stdout")
if typ in ["<class 'function'>", "<class 'bound_method'>"]:
fp.write(indent + "def " + name + "():\n")
fp.write(indent + " pass\n\n")
elif typ in ["<class 'str'>", "<class 'int'>", "<class 'float'>"]:
fp.write(indent + name + " = " + rep + "\n")
elif typ == "<class 'type'>" and indent == "":
# full expansion only on toplevel
fp.write("\n")
fp.write(indent + "class " + name + ":\n") # What about superclass?
fp.write(indent + " ''\n")
self._dump_object_stubs(
fp, "{0}.{1}".format(object_expr, name), indent + " "
)
else:
# keep only the name
fp.write(indent + name + " = None\n")
def _cmd_cat(self, cmd):
if len(cmd.args) != 1:
self._send_error_to_shell("Command requires one argument")
return
source = cmd.args[0]
mount = self._get_fs_mount()
if mount is None:
self._cat_via_serial(source)
else:
source = os.path.join(mount, source.strip("/"))
self._cat_via_mount(source)
def _cmd_lsdevice(self, cmd):
try:
items = self._list_files()
out = "\n".join(items) + "\n"
self._send_text_to_shell(out, "stdout")
finally:
self._non_serial_msg_queue.put(ToplevelResponse())
def _cmd_upload(self, cmd):
# Target is interpreted relative to the root
if len(cmd.args) == 1:
source = cmd.args[0]
# target is at root
target = os.path.basename(source)
elif len(cmd.args) == 2:
source = cmd.args[0]
target = cmd.args[1]
else:
# TODO: test this case
raise RuntimeError("Command requires 1 or 2 arguments")
if not os.path.isabs(source):
source = os.path.join(get_workbench().get_cwd(), source)
if not os.path.isfile(source):
raise IOError("No such file: %s" % source)
target = target.replace("\\", "/")
# Only prepend slash if it is known that device supports directories
# (it's probably safe to omit slash anyway)
if self._supports_directories() and not target.startswith("/"):
target = "/" + target
try:
self._check_and_upload(source, target)
finally:
self._non_serial_msg_queue.put(ToplevelResponse())
# TODO: Output confirmation ? (together with file size)
# Or should the confirmation be given in terms of mount path?
def _cmd_write_file(self, cmd):
BUFFER_SIZE = 32
data = cmd["content_bytes"]
self._execute_and_expect_empty_response(
dedent(
"""
__temp_path = '{path}'
__temp_f = open(__temp_path, 'wb')
__temp_written = 0
"""
).format(path=cmd["path"])
)
size = len(data)
for i in range(0, size, BUFFER_SIZE):
chunk_size = min(BUFFER_SIZE, size - i)
chunk = data[i : i + chunk_size]
self._execute_and_expect_empty_response(
"__temp_written += __temp_f.write({chunk!r})".format(chunk=chunk)
)
self._execute_async(
dedent(
"""
try:
__temp_f.close()
del __temp_f
if __temp_written != <<size>>:
raise RuntimeError("Wrote %d bytes out of %d" % (__temp_written, <<size>>))
del __temp_written
print('\\x04\\x02', {
'message_class' : 'InlineResponse',
'command_name': 'write_file',
'path' : __temp_path
})
except Exception as e:
print('\\x04\\x02', {
'message_class' : 'InlineResponse',
'command_name':'write_file',
'path' : __temp_path,
'error' : 'Error saving file content: ' + str(e)
})
del __temp_path
"""
).replace("<<size>>", str(size))
)
def _cmd_read_file(self, cmd):
print("READING", cmd)
try:
self._execute_async(
dedent(
"""
try:
__temp_path = '%(path)s'
with open(__temp_path, 'rb') as __temp_fp:
print('\\x04\\x02', {
'message_class' : 'InlineResponse',
'command_name': 'read_file',
'path' : __temp_path,
'content_bytes': __temp_fp.read()
})
del __temp_fp
del __temp_path
except Exception as e:
print('\\x04\\x02', {
'message_class' : 'InlineResponse',
'command_name':'read_file',
'path' : __temp_path,
'content_bytes': b'',
'error' : 'Error getting file content: ' + str(e)
})
"""
)
% cmd
)
except Exception:
self._non_serial_msg_queue.put(
InlineResponse(
command_name="read_file",
path=cmd.path,
content_bytes=b"",
error="Error requesting file content:\\n" + traceback.format_exc(),
)
)
def _check_and_upload(self, source, target):
# if target is a py file,
# then give warning if source has syntax errors
# Note that it's incomplete solution --
# if current Python version is later than 3.5, then it may
# accept source which gives syntax errors on MP.
if target.endswith(".py"):
with tokenize.open(source) as fp:
src = fp.read()
try:
ast.parse(src, source)
except SyntaxError as e:
self._send_error_to_shell(
"%s has syntax errors:\n%s\n\nFile will not be uploaded."
% (source, e)
)
return
try:
self._upload(source, target)
except Exception:
self._send_error_to_shell(traceback.format_exc())
def _upload(self, source, target):
mount = self._get_fs_mount()
if mount is None:
self._upload_via_serial(source, target)
else:
virtual_path = os.path.join(mount, target.strip("/"))
self._upload_via_mount(source, virtual_path)
def _upload_via_serial(self, source, target):
assert self.idle
with open(source, "rb") as local:
content = local.read()
self._execute_and_expect_empty_response("__upf = open(%r, 'wb')" % target)
BLOCK_SIZE = 64
for i in range(0, len(content), BLOCK_SIZE):
self._execute_and_expect_empty_response(
"__upf.write(%r)" % content[i : i + BLOCK_SIZE]
)
self._execute_and_expect_empty_response("__upf.close()")
self._execute_and_expect_empty_response("del __upf")
def _upload_via_mount(self, source, target):
with open(source, "rb") as fp:
content = fp.read()
try:
with open(target, "wb") as fp:
fp.write(content)
# Force writes to the device to avoid data corruption
# when user resets or plugs out the device
os.fsync(fp)
except OSError as e:
self._report_upload_via_mount_error(source, target, e)
return
def _report_upload_via_mount_error(self, source, target, error):
self._send_error_to_shell(
"Couldn't write to %s\nOriginal error: %s\n\nIf the target directory exists then it may be corrupted."
% (target, error)
)
def _cat_via_serial(self, source):
try:
out, err = self._execute_and_get_response(
dedent(
"""
with open(%r, "r") as fp:
print(fp.read())
"""
% source
).strip()
)
if out:
self._send_text_to_shell(
out.decode("utf-8", errors="replace"), "stdout"
)
if err:
self._send_text_to_shell(
err.decode("utf-8", errors="replace"), "stderr"
)
except Exception:
self._send_error_to_shell(traceback.format_exc())
finally:
self._non_serial_msg_queue.put(ToplevelResponse())
def _cat_via_mount(self, source):
try:
with open(source, "r", encoding="UTF-8", errors="replace") as fp:
self._send_text_to_shell(fp.read(), "stdout")
except Exception:
self._send_error_to_shell(traceback.format_exc())
finally:
self._non_serial_msg_queue.put(ToplevelResponse())
def _list_files(self):
mount = self._get_fs_mount()
if mount is None:
return self._execute_and_parse_value(
"import os as __os_; print(__os_.listdir()); del __os_"
)
else:
return os.listdir(mount)
def _supports_directories(self):
if "micro:bit" in self._welcome_text.lower():
return False
else:
return True
def _get_fs_mount_name(self):
return None
def _get_bootloader_mount_name(self):
return None
def _get_fs_mount(self):
if self._get_fs_mount_name() is None:
return None
else:
candidates = find_volumes_by_name(self._get_fs_mount_name())
if len(candidates) == 0:
raise RuntimeError("Could not find volume " + self._get_fs_mount_name())
elif len(candidates) > 1:
raise RuntimeError(
"Found several possible mount points: %s" % candidates
)
else:
return candidates[0]
def _read_next_serial_message(self) -> Optional[MessageFromBackend]:
new_bytes = self._connection.read_all()
if len(new_bytes) == 0:
return None
# TODO: Handle the case where part of the prompt was already published in previous message
# Look for the first marker (EOT anywhere or NORMAL_PROMPT in the end of the seq)
match = re.search(
b"("
+ EOT
+ b"|"
+ NORMAL_PROMPT
+ b"$" # Consider prompts only if they're at the end of output
+ b"|"
+ FIRST_RAW_PROMPT
+ b"$"
+ b")",
new_bytes,
)
if match is None:
# normal output (possibly partial)
return self._read_output_message(new_bytes, False)
elif match.start() > 0:
# starts with block of normal output
self._connection.unread(new_bytes[match.start() :])
return self._read_output_message(new_bytes[: match.start()], True)
elif match.group() == FIRST_RAW_PROMPT:
assert new_bytes == FIRST_RAW_PROMPT
self.idle = True
return ToplevelResponse()
elif match.group() == NORMAL_PROMPT:
# Go to raw prompt
assert new_bytes == NORMAL_PROMPT, "Got %s" % new_bytes
return self._enter_raw_repl(True)
else:
assert match.group() == EOT
assert match.start() == 0
if len(new_bytes) == 1:
# can't decide anything yet
self._connection.unread(new_bytes)
return None
elif new_bytes[1:2] == RAW_PROMPT:
# must be end of the response to a non-Thonny command
# Only treat as raw prompt if it ends the output
if new_bytes[1:] == RAW_PROMPT:
assert (
self._connection.incoming_is_empty()
) # TODO: what about Ctlr-? ?
self.idle = True
return ToplevelResponse()
else:
# Looks like the prompt was discarded by a soft reboot (or some other reason?)
# hide it and forget it
self._connection.unread(new_bytes[2:])
return None
elif new_bytes[1:2] == THONNY_MSG_START:
# must be followed by empty error block and raw prompt
# read the message, following exception block and next prompt
terminator = b"\r\n" + EOT + EOT + RAW_PROMPT
term_loc = new_bytes.find(terminator)
if term_loc == -1:
# not complete yet
self._connection.unread(new_bytes)
return None
elif term_loc == len(new_bytes) - len(terminator):
# This terminator ends the bytes
# The normal, completed case
assert self._connection.incoming_is_empty()
msg_bytes = new_bytes[2 : -len(terminator)]
self.idle = True
return self._parse_message(msg_bytes)
else:
# There is something following the terminator
# I guess this can be caused by interrupt
# This means the message is stale
logging.info(
"disregarding out of date Thonny message: %r", new_bytes
)
# Unread following stuff
self._connection.unread(new_bytes[term_loc + len(terminator) :])
else:
# exception block
# this is followed by EOT and can/should be read in one piece
next_eot_loc = new_bytes.find(EOT, 1)
if next_eot_loc == -1:
# the block isn't complete yet
self._connection.unread(new_bytes)
return None
else:
# block is complete
block_bytes = new_bytes[1:next_eot_loc]
leftover_bytes = new_bytes[next_eot_loc:] # should be EOT + >
self._connection.unread(leftover_bytes)
if len(block_bytes) > 0:
# non-empty exception block
return BackendEvent(
event_type="ProgramOutput",
stream_name="stderr",
data=self.transform_output(
block_bytes.decode("utf-8", "replace"), "stderr"
),
)
else:
return None
return None
def _parse_message(self, msg_bytes):
try:
msg_str = msg_bytes.decode("utf-8").strip()
except:
traceback.print_exc()
msg_str = msg_bytes.decode("utf-8", "replace").strip()
try:
msg = ast.literal_eval(msg_str)
except:
logging.getLogger("thonny").error("Could not eval %r", msg_str)
raise
assert isinstance(msg, dict)
class_name = msg["message_class"]
del msg["message_class"]
assert class_name in globals()
class_ = globals()[class_name]
return class_(**msg)
def _read_output_message(self, out_bytes, complete) -> Optional[BackendEvent]:
if complete:
out_str = out_bytes.decode("utf-8", "replace")
else:
# out_bytes may end with a partial utf-8 char
while True:
try:
out_str = out_bytes.decode("utf-8", "replace")
break
except UnicodeDecodeError:
# unread last byte and try again
self._connection.unread(out_bytes[-1:])
out_bytes = out_bytes[:-1]
if len(out_str) == 0:
return None
else:
transformed = self.transform_output(out_str, "stdout")
return BackendEvent(
event_type="ProgramOutput", stream_name="stdout", data=transformed
)
def transform_output(self, s, stream_name):
if os.name != "nt":
#
s = s.replace("\r\n", "\n")
# replace "<stdin>" in error messages with script name
if (
stream_name == "stderr"
and self._last_toplevel_command
and self._last_toplevel_command.name in ["Run", "run"]
and hasattr(self._last_toplevel_command, "script_path")
):
s = s.replace('"<stdin>"', '"%s"' % self._last_toplevel_command.script_path)
# TODO: get rid of raw prompts (may occur after soft reboot)
# TOOD: move it to CircuitPython subclass
return s.replace(
"Press any key to enter the REPL. Use CTRL-D to reload.",
"Press CTRL-C to enter the REPL. Use CTRL-D to reload.",
)
def _get_path_prefix(self):
if not self._supports_directories():
return ""
elif "LoBo" in self._welcome_text or "WiPy with ESP32" in self._welcome_text:
return "/flash/"
else:
return "/"
def get_default_directory(self):
prefix = self._get_path_prefix()
if prefix.endswith("/") and prefix != "/":
return prefix[:-1]
else:
return prefix
def _get_main_script_path(self):
return self._get_path_prefix() + "main.py"
def _get_boot_script_path(self):
return self._get_path_prefix() + "boot.py"
def _get_script_path(self):
local_path = (
get_workbench().get_editor_notebook().get_current_editor().save_file(False)
)
assert os.path.isfile(local_path), "File not found: %s" % local_path
return self._get_path_prefix() + os.path.basename(local_path)
def transform_message(self, msg):
if msg is None:
return None
if isinstance(msg.get("value_info", None), tuple):
msg["value_info"] = common.ValueInfo(*msg["value_info"])
if (
getattr(msg, "command_name", None) == "shell_autocomplete"
and "completions" not in msg
):
names = msg["names"]
match = msg["match"]
del msg["names"]
matches = [
{"name": n, "complete": n[len(match) :]}
for n in names
if n.startswith(match) and not n.startswith("__")
]
msg["completions"] = matches
return msg
else:
return msg
def _enter_raw_repl(self, strict):
if strict:
assert self._connection.buffers_are_empty()
discarded_data = b""
for delay in [0.01, 0.05, 0.1, 0.5]:
self._connection.write(b"\x03")
sleep(delay / 3)
self._connection.write(b"\x01")
sleep(delay)
# Consume the raw repl introduction + prompt
discarded_data += self._connection.read_all()
if discarded_data.endswith(b"\r\n>"):
self.idle = True
return ToplevelResponse()
self._send_error_to_shell(
"Couldn't connect to the raw REPL. Serial output: " + str(discarded_data)
)
self.idle = False
return None
def _send_error_to_shell(self, message_text):
self._send_text_to_shell(message_text, "stderr")
def _send_text_to_shell(self, message_text, stream_name):
if not message_text.endswith("\n"):
message_text += "\n"
self._non_serial_msg_queue.put(
BackendEvent(
event_type="ProgramOutput", stream_name=stream_name, data=message_text
)
)
def _get_builtins_info(self):
"""
for p in self._get_api_stubs_path():
builtins_file = os.path.join(p, "__builtins__.py")
if os.path.exists(builtins_file):
return parse_api_information(builtins_file)
"""
path = os.path.join(self._get_api_stubs_path(), "builtins.py")
if os.path.exists(path):
return parse_api_information(path)
else:
return {}
def _get_api_stubs_path(self):
import inspect
return os.path.join(
os.path.dirname(inspect.getfile(self.__class__)), "api_stubs"
)
@property
def firmware_filetypes(self):
return [("all files", ".*")]
@property
def micropython_upload_enabled(self):
return self._connection is not None
def select_and_upload_micropython(self):
firmware_path = askopenfilename(
filetypes=self.firmware_filetypes,
initialdir=get_workbench().get_option("run.working_directory"),
)
if firmware_path:
self.upload_micropython(firmware_path)
def upload_micropython(self, firmware_path):
cmd = self.construct_firmware_upload_command(firmware_path)
self.disconnect()
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
dlg = SubprocessDialog(
get_workbench(),
proc,
"Uploading firmware",
autoclose=False,
conclusion="Done.\nNB! If opening REPL fails on first trial\nthen wait a second and try again.",
)
show_dialog(dlg)
def construct_firmware_upload_command(self, firmware_path):
raise NotImplementedError()
@property
def known_usb_vids_pids(self):
"""Return set of pairs of USB device VID, PID"""
return set()
@property
def known_port_descriptions(self):
return set()
def get_node_label(self):
if "CircuitPython" in self._welcome_text:
return "CircuitPython device"
elif "micro:bit" in self._welcome_text.lower():
return "micro:bit"
else:
return "MicroPython device"
def has_separate_files(self):
return self._connection is not None
def can_do_file_operations(self):
return self.idle
class MicroPythonConfigPage(BackendDetailsConfigPage):
backend_name = None # Will be overwritten on Workbench.add_backend
def __init__(self, master):
super().__init__(master)
intro_text = (
"Connect your device to the computer and select corresponding port below (look for your device name, \n"
+ '"USB Serial" or "UART"). If you can\'t find it, you may need to install proper USB driver first.'
)
if self.allow_webrepl:
intro_text = (
"Connecting via USB cable:\n"
+ intro_text
+ "\n\n"
+ "Connecting via WebREPL protocol:\n"
+ "If your device supports WebREPL, first connect via serial, make sure WebREPL is enabled\n"
+ "(import webrepl_setup), connect your computer and device to same network and select < WebREPL > below"
)
intro_label = ttk.Label(self, text=intro_text)
intro_label.grid(row=0, column=0, sticky="nw")
driver_url = self._get_usb_driver_url()
if driver_url:
driver_url_label = create_url_label(self, driver_url)
driver_url_label.grid(row=1, column=0, sticky="nw")
port_label = ttk.Label(
self, text="Port or WebREPL" if self.allow_webrepl else "Port"
)
port_label.grid(row=3, column=0, sticky="nw", pady=(10, 0))
self._ports_by_desc = {
p.description
if p.device in p.description
else p.description + " (" + p.device + ")": p.device
for p in list_serial_ports()
}
self._ports_by_desc["< Try to detect port automatically >"] = "auto"
self._ports_by_desc["< None / don't connect at all >"] = None
self._WEBREPL_OPTION_DESC = "< WebREPL >"
if self.allow_webrepl:
self._ports_by_desc[self._WEBREPL_OPTION_DESC] = "webrepl"
def port_order(p):
_, name = p
if name is None:
return ""
elif name.startswith("COM") and len(name) == 4:
# Make one-digit COM ports go before COM10
return name.replace("COM", "COM0")
else:
return name
# order by port, auto first
port_descriptions = [
key for key, _ in sorted(self._ports_by_desc.items(), key=port_order)
]
self._port_desc_variable = create_string_var(
self.get_current_port_desc(), self._on_change_port
)
self._port_combo = ttk.Combobox(
self,
exportselection=False,
textvariable=self._port_desc_variable,
values=port_descriptions,
)
self._port_combo.state(["!disabled", "readonly"])
self._port_combo.grid(row=4, column=0, sticky="new")
self.columnconfigure(0, weight=1)
if self.allow_webrepl:
self._init_webrepl_frame()
self._on_change_port()
def _init_webrepl_frame(self):
self._webrepl_frame = ttk.Frame(self)
self._webrepl_url_var = create_string_var(DEFAULT_WEBREPL_URL)
url_label = ttk.Label(
self._webrepl_frame, text="URL (eg. %s)" % DEFAULT_WEBREPL_URL
)
url_label.grid(row=0, column=0, sticky="nw", pady=(10, 0))
url_entry = ttk.Entry(
self._webrepl_frame, textvariable=self._webrepl_url_var, width=24
)
url_entry.grid(row=1, column=0, sticky="nw")
self._webrepl_password_var = create_string_var(
get_workbench().get_option(self.backend_name + ".webrepl_password")
)
pw_label = ttk.Label(
self._webrepl_frame,
text="Password (the one specified with `import webrepl_setup`)",
)
pw_label.grid(row=2, column=0, sticky="nw", pady=(10, 0))
pw_entry = ttk.Entry(
self._webrepl_frame, textvariable=self._webrepl_password_var, width=9
)
pw_entry.grid(row=3, column=0, sticky="nw")
def get_current_port_desc(self):
name = get_workbench().get_option(self.backend_name + ".port")
for desc in self._ports_by_desc:
if self._ports_by_desc[desc] == name:
return desc
return ""
def is_modified(self):
return (
self._port_desc_variable.modified # pylint: disable=no-member
or self.allow_webrepl
and self._webrepl_password_var.modified # pylint: disable=no-member
or self.allow_webrepl
and self._webrepl_url_var.modified
) # pylint: disable=no-member
def should_restart(self):
return self.is_modified()
def apply(self):
if not self.is_modified():
return
else:
port_desc = self._port_desc_variable.get()
port_name = self._ports_by_desc[port_desc]
get_workbench().set_option(self.backend_name + ".port", port_name)
get_workbench().set_option(
self.backend_name + ".webrepl_url", self._webrepl_url_var.get()
)
get_workbench().set_option(
self.backend_name + ".webrepl_password",
self._webrepl_password_var.get(),
)
def _on_change_port(self, *args):
if self._port_desc_variable.get() == self._WEBREPL_OPTION_DESC:
self._webrepl_frame.grid(row=6, column=0, sticky="nwe")
elif self.allow_webrepl and self._webrepl_frame.winfo_ismapped():
self._webrepl_frame.grid_forget()
def _get_usb_driver_url(self):
return None
@property
def allow_webrepl(self):
return False
class GenericMicroPythonProxy(MicroPythonProxy):
@property
def known_usb_vids_pids(self):
"""Return set of pairs of USB device (VID, PID)"""
return {
# Generic MicroPython Board, see http://pid.codes/org/MicroPython/
(0x1209, 0xADDA)
}
class GenericMicroPythonConfigPage(MicroPythonConfigPage):
@property
def allow_webrepl(self):
return True
class Connection:
"""Utility class for using Serial or WebSocket connection
Uses background thread to read from the source as soon as possible
to avoid loss of data (because buffer overflow or the device discarding
unread data).
Allows writing with delays after each n bytes.
Allows unreading data.
"""
def __init__(self):
self._read_queue = Queue() # populated by reader thread
self._read_buffer = bytearray() # used for unreading and postponing bytes
self.num_bytes_received = 0
self._error = None
def read(self, size, timeout=1):
if timeout == 0:
raise TimeoutError()
timer = TimeHelper(timeout)
while len(self._read_buffer) < size:
self._check_for_error()
try:
self._read_buffer.extend(self._read_queue.get(True, timer.time_left))
except queue.Empty:
raise TimeoutError(
"Reaction timeout. Bytes read: %s" % self._read_buffer
)
try:
data = self._read_buffer[:size]
return data
finally:
del self._read_buffer[:size]
def read_until(self, terminators, timeout=2):
if timeout == 0:
raise TimeoutError()
timer = TimeHelper(timeout)
if not isinstance(terminators, (set, list, tuple)):
terminators = [terminators]
terminator = None
while True:
self._check_for_error()
found = False
for terminator in terminators:
if terminator in self._read_buffer:
found = True
break
if found:
break
try:
data = self._read_queue.get(True, timer.time_left)
assert len(data) > 0
self._read_buffer.extend(data)
except queue.Empty:
raise TimeoutError(
"Reaction timeout. Bytes read: %s" % self._read_buffer
)
assert terminator is not None
size = self._read_buffer.index(terminator) + len(terminator)
try:
data = self._read_buffer[:size]
return data
finally:
del self._read_buffer[:size]
def read_all(self):
while not self._read_queue.empty():
self._read_buffer.extend(self._read_queue.get(True))
if len(self._read_buffer) == 0:
self._check_for_error()
try:
return self._read_buffer
finally:
self._read_buffer = bytearray()
def _check_for_error(self):
if self._error:
raise SerialException("EOF")
def unread(self, data):
self._read_buffer = data + self._read_buffer
def write(self, data, block_size=32, delay=0.01):
raise NotImplementedError()
def _log_data(self, data):
print(
data.decode("Latin-1")
.replace("\r\n", "\n")
.replace("\x01", "①")
.replace("\x02", "②")
.replace("\x03", "③")
.replace("\x04", "④"),
end="",
)
def incoming_is_empty(self):
return self._read_queue.empty() and len(self._read_buffer) == 0
def outgoing_is_empty(self):
return True
def buffers_are_empty(self):
return self.incoming_is_empty() and self.outgoing_is_empty()
def reset_input_buffer(self):
return self.read_all()
def reset_output_buffer(self):
pass
def close(self):
raise NotImplementedError()
class SerialConnection(Connection):
def __init__(self, port, baudrate):
super().__init__()
self._serial = serial.Serial(port, baudrate=baudrate, timeout=None)
self._reading_thread = threading.Thread(target=self._listen_serial, daemon=True)
self._reading_thread.start()
def write(self, data, block_size=32, delay=0.01):
for i in range(0, len(data), block_size):
block = data[i : i + block_size]
# self._log_data(b"[" + block + b"]")
size = self._serial.write(block)
assert size == len(block)
time.sleep(delay)
return len(data)
def _listen_serial(self):
"NB! works in background thread"
try:
while True:
b = self._serial.read(1) # To avoid busy loop
if len(b) == 0:
self._error = "EOF"
# print("LISTEN EOFFFFFFFFFF")
break
data = b + self._serial.read_all()
self.num_bytes_received += len(data)
self._read_queue.put(data)
# self._log_data(data)
except SerialException as e:
logging.exception("Error while reading from serial")
self._error = str("Serial reading error: %s" % e)
def incoming_is_empty(self):
return self._serial.in_waiting == 0 and super().incoming_is_empty()
def outgoing_is_empty(self):
return self._serial.out_waiting == 0
def reset_output_buffer(self):
self._serial.reset_output_buffer()
def close(self):
if self._serial is not None:
try:
self._serial.cancel_read()
self._reading_thread.join()
finally:
try:
self._serial.close()
self._serial = None
except Exception:
logging.exception("Couldn't close serial")
class WebReplConnection(Connection):
def __init__(self, url, password):
super().__init__()
self._url = url
self._password = password
# Some tricks are needed to use async library in sync program
# use thread-safe queues to communicate with async world in another thread
self._write_queue = Queue()
self._connection_result = Queue()
self._ws_thread = threading.Thread(target=self._wrap_ws_main, daemon=True)
self._ws_thread.start()
# Wait until connection was made
res = self._connection_result.get()
if res != "OK":
raise res
def _wrap_ws_main(self):
import asyncio
loop = asyncio.new_event_loop()
loop.set_debug(True)
loop.run_until_complete(self._ws_main())
async def _ws_main(self):
import asyncio
try:
await self._ws_connect()
except Exception as e:
self._connection_result.put_nowait(e)
return
self._connection_result.put_nowait("OK")
await asyncio.gather(self._ws_keep_reading(), self._ws_keep_writing())
async def _ws_connect(self):
import asyncio
import websockets
self._ws = await asyncio.wait_for(
websockets.connect(self._url, ping_interval=None), 3
)
print("GOT WS", self._ws)
# read password prompt and send password
read_chars = ""
while read_chars != "Password: ":
print("prelude", read_chars)
ch = await self._ws.recv()
print("GOT", ch)
read_chars += ch
print("sending password")
await self._ws.send(self._password + "\n")
print("sent password")
async def _ws_keep_reading(self):
while True:
data = (await self._ws.recv()).encode("UTF-8")
print("Read:", repr(data))
if len(data) == 0:
self._error = "EOF"
break
self.num_bytes_received += len(data)
self._read_queue.put(data, block=False)
async def _ws_keep_writing(self):
import asyncio
while True:
while not self._write_queue.empty():
data = self._write_queue.get(block=False).decode("UTF-8")
print("Wrote:", repr(data))
await self._ws.send(data)
# Allow reading loop to progress
await asyncio.sleep(0.01)
def write(self, data, block_size=32, delay=0.01):
self._write_queue.put_nowait(data)
async def _async_close(self):
await self._ws.close()
def close(self):
"""
import asyncio
asyncio.get_event_loop().run_until_complete(self.async_close())
"""
class TimeHelper:
def __init__(self, time_allowed):
self.start_time = time.time()
self.time_allowed = time_allowed
@property
def time_spent(self):
return time.time() - self.start_time
@property
def time_left(self):
return max(self.time_allowed - self.time_spent, 0)
def parse_api_information(file_path):
with tokenize.open(file_path) as fp:
source = fp.read()
tree = ast.parse(source)
defs = {}
# TODO: read also docstrings ?
for toplevel_item in tree.body:
if isinstance(toplevel_item, ast.ClassDef):
class_name = toplevel_item.name
member_names = []
for item in toplevel_item.body:
if isinstance(item, ast.FunctionDef):
member_names.append(item.name)
elif isinstance(item, ast.Assign):
# TODO: check Python 3.4
"TODO: item.targets[0].id"
defs[class_name] = member_names
return defs
def list_serial_ports():
# serial.tools.list_ports.comports() can be too slow
# because os.path.islink can be too slow (https://github.com/pyserial/pyserial/pull/303)
# Workarond: temporally patch os.path.islink
try:
old_islink = os.path.islink
if platform.system() == "Windows":
os.path.islink = lambda _: False
return list(serial.tools.list_ports.comports())
finally:
os.path.islink = old_islink
def add_micropython_backend(name, proxy_class, description, config_page):
get_workbench().set_default(name + ".port", "auto")
get_workbench().set_default(name + ".webrepl_url", DEFAULT_WEBREPL_URL)
get_workbench().set_default(name + ".webrepl_password", "")
get_workbench().add_backend(name, proxy_class, description, config_page)
def load_plugin():
add_micropython_backend(
"GenericMicroPython",
GenericMicroPythonProxy,
"MicroPython on a generic device",
GenericMicroPythonConfigPage,
)
def _upload_as(target_provider_method):
source_path = (
get_workbench().get_editor_notebook().get_current_editor().save_file(False)
)
if source_path is None:
return
proxy = get_runner().get_backend_proxy()
assert isinstance(proxy, MicroPythonProxy)
if os.path.isabs(source_path):
source_path = os.path.relpath(source_path, get_workbench().get_cwd())
target = getattr(proxy, target_provider_method)()
get_shell().submit_magic_command(["%upload", source_path, target])
def _cat(source_provider_method):
proxy = get_runner().get_backend_proxy()
assert isinstance(proxy, MicroPythonProxy)
source = getattr(proxy, source_provider_method)()
get_shell().submit_magic_command(["%cat", source])
def _upload_as_main_script():
_upload_as("_get_main_script_path")
def _upload_as_boot_script():
_upload_as("_get_boot_script_path")
def _upload_script():
_upload_as("_get_script_path")
def _cat_main_script():
_cat("_get_main_script_path")
def _cat_boot_script():
_cat("_get_boot_script_path")
def soft_reboot():
proxy = get_runner().get_backend_proxy()
if hasattr(proxy, "_soft_reboot_and_run_main"):
return proxy._soft_reboot_and_run_main()
return None
def soft_reboot_enabled():
proxy = get_runner().get_backend_proxy()
return (
proxy
and proxy.is_functional()
and hasattr(proxy, "_soft_reboot_and_run_main")
)
def disconnect():
proxy = get_runner().get_backend_proxy()
assert hasattr(proxy, "disconnect")
proxy.disconnect()
def disconnect_enabled():
proxy = get_runner().get_backend_proxy()
return hasattr(proxy, "disconnect")
def file_commands_enabled():
proxy = get_runner().get_backend_proxy()
return (
isinstance(proxy, MicroPythonProxy)
and get_workbench().get_editor_notebook().get_current_editor() is not None
and get_runner().is_waiting_toplevel_command()
)
def select_device():
get_workbench().show_options("Interpreter")
get_workbench().add_command(
"selectdevice", "device", "Select device", select_device, group=1
)
get_workbench().add_command(
"softreboot",
"device",
"Soft reboot",
soft_reboot,
soft_reboot_enabled,
group=100,
default_sequence="<Control-d>",
extra_sequences=["<<CtrlDInText>>"],
)
get_workbench().add_command(
"uploadmainscript",
"device",
"Upload current script as main script",
_upload_as_main_script,
tester=file_commands_enabled,
default_sequence="<Control-u>",
group=20,
)
get_workbench().add_command(
"uploadbootscript",
"device",
"Upload current script as boot script",
_upload_as_boot_script,
tester=file_commands_enabled,
group=20,
)
get_workbench().add_command(
"uploadscript",
"device",
"Upload current script with current name",
_upload_script,
tester=file_commands_enabled,
group=20,
)
get_workbench().add_command(
"catmainscript",
"device",
"Show device's main script",
_cat_main_script,
tester=file_commands_enabled,
group=20,
)
get_workbench().add_command(
"catbootscript",
"device",
"Show device's boot script",
_cat_boot_script,
tester=file_commands_enabled,
group=20,
)
get_workbench().add_command(
"disconnectserial",
"device",
"Close serial connection",
disconnect,
disconnect_enabled,
group=100,
)
| 34.325145 | 258 | 0.540622 |
420f40513344156574542f0b4d74a0cd09db5092
| 5,513 |
py
|
Python
|
qiskit/tools/parallel.py
|
filemaster/qiskit-terra
|
8672c407a5a0e34405315f82d5ad5847916e857e
|
[
"Apache-2.0"
] | null | null | null |
qiskit/tools/parallel.py
|
filemaster/qiskit-terra
|
8672c407a5a0e34405315f82d5ad5847916e857e
|
[
"Apache-2.0"
] | null | null | null |
qiskit/tools/parallel.py
|
filemaster/qiskit-terra
|
8672c407a5a0e34405315f82d5ad5847916e857e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
Routines for running Python functions in parallel using process pools
from the multiprocessing library.
"""
import os
import platform
from multiprocessing import Pool
from qiskit.qiskiterror import QiskitError
from qiskit._util import local_hardware_info
from qiskit.tools.events._pubsub import Publisher
# Set parallel flag
os.environ['QISKIT_IN_PARALLEL'] = 'FALSE'
# Number of local physical cpus
CPU_COUNT = local_hardware_info()['cpus']
def parallel_map(task, values, task_args=tuple(), task_kwargs={}, # pylint: disable=W0102
num_processes=CPU_COUNT):
"""
Parallel execution of a mapping of `values` to the function `task`. This
is functionally equivalent to::
result = [task(value, *task_args, **task_kwargs) for value in values]
On Windows this function defaults to a serial implementation to avoid the
overhead from spawning processes in Windows.
Args:
task (func): Function that is to be called for each value in ``task_vec``.
values (array_like): List or array of values for which the ``task``
function is to be evaluated.
task_args (list): Optional additional arguments to the ``task`` function.
task_kwargs (dict): Optional additional keyword argument to the ``task`` function.
num_processes (int): Number of processes to spawn.
Returns:
result: The result list contains the value of
``task(value, *task_args, **task_kwargs)`` for
each value in ``values``.
Raises:
QiskitError: If user interrupts via keyboard.
Events:
terra.parallel.start: The collection of parallel tasks are about to start.
terra.parallel.update: One of the parallel task has finished.
terra.parallel.finish: All the parallel tasks have finished.
"""
if len(values) == 1:
return [task(values[0], *task_args, **task_kwargs)]
Publisher().publish("terra.parallel.start", len(values))
nfinished = [0]
def _callback(_):
nfinished[0] += 1
Publisher().publish("terra.parallel.done", nfinished[0])
# Run in parallel if not Win and not in parallel already
if platform.system() != 'Windows' and num_processes > 1 \
and os.getenv('QISKIT_IN_PARALLEL') == 'FALSE':
os.environ['QISKIT_IN_PARALLEL'] = 'TRUE'
try:
pool = Pool(processes=num_processes)
async_res = [pool.apply_async(task, (value,) + task_args, task_kwargs,
_callback) for value in values]
while not all([item.ready() for item in async_res]):
for item in async_res:
item.wait(timeout=0.1)
pool.terminate()
pool.join()
except KeyboardInterrupt:
pool.terminate()
pool.join()
Publisher().publish("terra.parallel.finish")
raise QiskitError('Keyboard interrupt in parallel_map.')
Publisher().publish("terra.parallel.finish")
os.environ['QISKIT_IN_PARALLEL'] = 'FALSE'
return [ar.get() for ar in async_res]
# Cannot do parallel on Windows , if another parallel_map is running in parallel,
# or len(values) == 1.
results = []
for _, value in enumerate(values):
result = task(value, *task_args, **task_kwargs)
results.append(result)
_callback(0)
Publisher().publish("terra.parallel.finish")
return results
| 39.949275 | 90 | 0.667876 |
2353340f1c6d63bf5580b78b2ed612614636e559
| 10,962 |
py
|
Python
|
vmware-ose-osis-verifier/openapi_client/models/osis_user.py
|
csgtree/object-storage-extension-samples
|
397f3033ddd4aa1bb1e2079a9e77309a78cc0b0d
|
[
"Apache-2.0"
] | 6 |
2020-09-11T02:52:42.000Z
|
2021-04-19T11:20:42.000Z
|
vmware-ose-osis-verifier/openapi_client/models/osis_user.py
|
csgtree/object-storage-extension-samples
|
397f3033ddd4aa1bb1e2079a9e77309a78cc0b0d
|
[
"Apache-2.0"
] | 5 |
2020-12-21T20:14:59.000Z
|
2022-03-21T14:35:43.000Z
|
vmware-ose-osis-verifier/openapi_client/models/osis_user.py
|
csgtree/object-storage-extension-samples
|
397f3033ddd4aa1bb1e2079a9e77309a78cc0b0d
|
[
"Apache-2.0"
] | 4 |
2021-07-20T09:07:52.000Z
|
2022-03-21T14:33:44.000Z
|
# coding: utf-8
"""
Object Storage Interoperability Services API
This is VMware Cloud Director Object Storage Interoperability Services API. Once storage platform vendor implements REST APIs complying with this specification, Object Storage Extension can integrate with the platform without coding effort. # noqa: E501
The version of the OpenAPI document: 1.0.0-oas3
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class OsisUser(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'user_id': 'str',
'canonical_user_id': 'str',
'tenant_id': 'str',
'active': 'bool',
'username': 'str',
'email': 'str',
'role': 'str',
'cd_user_id': 'str',
'cd_tenant_id': 'str'
}
attribute_map = {
'user_id': 'user_id',
'canonical_user_id': 'canonical_user_id',
'tenant_id': 'tenant_id',
'active': 'active',
'username': 'username',
'email': 'email',
'role': 'role',
'cd_user_id': 'cd_user_id',
'cd_tenant_id': 'cd_tenant_id'
}
def __init__(self, user_id=None, canonical_user_id=None, tenant_id=None, active=None, username=None, email=None, role=None, cd_user_id=None, cd_tenant_id=None, local_vars_configuration=None): # noqa: E501
"""OsisUser - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._user_id = None
self._canonical_user_id = None
self._tenant_id = None
self._active = None
self._username = None
self._email = None
self._role = None
self._cd_user_id = None
self._cd_tenant_id = None
self.discriminator = None
self.user_id = user_id
self.canonical_user_id = canonical_user_id
self.tenant_id = tenant_id
self.active = active
if username is not None:
self.username = username
if email is not None:
self.email = email
if role is not None:
self.role = role
self.cd_user_id = cd_user_id
self.cd_tenant_id = cd_tenant_id
@property
def user_id(self):
"""Gets the user_id of this OsisUser. # noqa: E501
user id # noqa: E501
:return: The user_id of this OsisUser. # noqa: E501
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this OsisUser.
user id # noqa: E501
:param user_id: The user_id of this OsisUser. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and user_id is None: # noqa: E501
raise ValueError("Invalid value for `user_id`, must not be `None`") # noqa: E501
self._user_id = user_id
@property
def canonical_user_id(self):
"""Gets the canonical_user_id of this OsisUser. # noqa: E501
canonical user id # noqa: E501
:return: The canonical_user_id of this OsisUser. # noqa: E501
:rtype: str
"""
return self._canonical_user_id
@canonical_user_id.setter
def canonical_user_id(self, canonical_user_id):
"""Sets the canonical_user_id of this OsisUser.
canonical user id # noqa: E501
:param canonical_user_id: The canonical_user_id of this OsisUser. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and canonical_user_id is None: # noqa: E501
raise ValueError("Invalid value for `canonical_user_id`, must not be `None`") # noqa: E501
self._canonical_user_id = canonical_user_id
@property
def tenant_id(self):
"""Gets the tenant_id of this OsisUser. # noqa: E501
id of the tenant which the user belongs to # noqa: E501
:return: The tenant_id of this OsisUser. # noqa: E501
:rtype: str
"""
return self._tenant_id
@tenant_id.setter
def tenant_id(self, tenant_id):
"""Sets the tenant_id of this OsisUser.
id of the tenant which the user belongs to # noqa: E501
:param tenant_id: The tenant_id of this OsisUser. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and tenant_id is None: # noqa: E501
raise ValueError("Invalid value for `tenant_id`, must not be `None`") # noqa: E501
self._tenant_id = tenant_id
@property
def active(self):
"""Gets the active of this OsisUser. # noqa: E501
user status # noqa: E501
:return: The active of this OsisUser. # noqa: E501
:rtype: bool
"""
return self._active
@active.setter
def active(self, active):
"""Sets the active of this OsisUser.
user status # noqa: E501
:param active: The active of this OsisUser. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and active is None: # noqa: E501
raise ValueError("Invalid value for `active`, must not be `None`") # noqa: E501
self._active = active
@property
def username(self):
"""Gets the username of this OsisUser. # noqa: E501
username mapped with Cloud Director username # noqa: E501
:return: The username of this OsisUser. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this OsisUser.
username mapped with Cloud Director username # noqa: E501
:param username: The username of this OsisUser. # noqa: E501
:type: str
"""
self._username = username
@property
def email(self):
"""Gets the email of this OsisUser. # noqa: E501
user email # noqa: E501
:return: The email of this OsisUser. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this OsisUser.
user email # noqa: E501
:param email: The email of this OsisUser. # noqa: E501
:type: str
"""
self._email = email
@property
def role(self):
"""Gets the role of this OsisUser. # noqa: E501
user role # noqa: E501
:return: The role of this OsisUser. # noqa: E501
:rtype: str
"""
return self._role
@role.setter
def role(self, role):
"""Sets the role of this OsisUser.
user role # noqa: E501
:param role: The role of this OsisUser. # noqa: E501
:type: str
"""
allowed_values = ["PROVIDER_ADMIN", "TENANT_ADMIN", "TENANT_USER", "ANONYMOUS", "UNKNOWN"] # noqa: E501
if self.local_vars_configuration.client_side_validation and role not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `role` ({0}), must be one of {1}" # noqa: E501
.format(role, allowed_values)
)
self._role = role
@property
def cd_user_id(self):
"""Gets the cd_user_id of this OsisUser. # noqa: E501
Cloud Director user id # noqa: E501
:return: The cd_user_id of this OsisUser. # noqa: E501
:rtype: str
"""
return self._cd_user_id
@cd_user_id.setter
def cd_user_id(self, cd_user_id):
"""Sets the cd_user_id of this OsisUser.
Cloud Director user id # noqa: E501
:param cd_user_id: The cd_user_id of this OsisUser. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and cd_user_id is None: # noqa: E501
raise ValueError("Invalid value for `cd_user_id`, must not be `None`") # noqa: E501
self._cd_user_id = cd_user_id
@property
def cd_tenant_id(self):
"""Gets the cd_tenant_id of this OsisUser. # noqa: E501
id of Cloud Director tenant which the user belongs to # noqa: E501
:return: The cd_tenant_id of this OsisUser. # noqa: E501
:rtype: str
"""
return self._cd_tenant_id
@cd_tenant_id.setter
def cd_tenant_id(self, cd_tenant_id):
"""Sets the cd_tenant_id of this OsisUser.
id of Cloud Director tenant which the user belongs to # noqa: E501
:param cd_tenant_id: The cd_tenant_id of this OsisUser. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and cd_tenant_id is None: # noqa: E501
raise ValueError("Invalid value for `cd_tenant_id`, must not be `None`") # noqa: E501
self._cd_tenant_id = cd_tenant_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OsisUser):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OsisUser):
return True
return self.to_dict() != other.to_dict()
| 30.45 | 258 | 0.597154 |
49a568226422513d93febf59c2b07aa6fcd06178
| 10,788 |
py
|
Python
|
test/models/test_utils.py
|
talesa/botorch
|
ab04dd39a2d4c7734e41c5f26eb2dbba5b0e1771
|
[
"MIT"
] | null | null | null |
test/models/test_utils.py
|
talesa/botorch
|
ab04dd39a2d4c7734e41c5f26eb2dbba5b0e1771
|
[
"MIT"
] | null | null | null |
test/models/test_utils.py
|
talesa/botorch
|
ab04dd39a2d4c7734e41c5f26eb2dbba5b0e1771
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import torch
from botorch import settings
from botorch.exceptions import InputDataError, InputDataWarning
from botorch.models.utils import (
add_output_dim,
check_min_max_scaling,
check_no_nans,
check_standardization,
fantasize,
gpt_posterior_settings,
multioutput_to_batch_mode_transform,
validate_input_scaling,
)
from botorch.utils.testing import BotorchTestCase
from gpytorch import settings as gpt_settings
class TestMultiOutputToBatchModeTransform(BotorchTestCase):
def test_multioutput_to_batch_mode_transform(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
n = 3
num_outputs = 2
train_X = torch.rand(n, 1, **tkwargs)
train_Y = torch.rand(n, num_outputs, **tkwargs)
train_Yvar = torch.rand(n, num_outputs, **tkwargs)
X_out, Y_out, Yvar_out = multioutput_to_batch_mode_transform(
train_X=train_X,
train_Y=train_Y,
num_outputs=num_outputs,
train_Yvar=train_Yvar,
)
expected_X_out = train_X.unsqueeze(0).expand(num_outputs, -1, 1)
self.assertTrue(torch.equal(X_out, expected_X_out))
self.assertTrue(torch.equal(Y_out, train_Y.transpose(0, 1)))
self.assertTrue(torch.equal(Yvar_out, train_Yvar.transpose(0, 1)))
class TestAddOutputDim(BotorchTestCase):
def test_add_output_dim(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
original_batch_shape = torch.Size([2])
# check exception is raised when trailing batch dims do not line up
X = torch.rand(2, 3, 2, 1, **tkwargs)
with self.assertRaises(RuntimeError):
add_output_dim(X=X, original_batch_shape=original_batch_shape)
# test no new batch dims
X = torch.rand(2, 2, 1, **tkwargs)
X_out, output_dim_idx = add_output_dim(
X=X, original_batch_shape=original_batch_shape
)
self.assertTrue(torch.equal(X_out, X.unsqueeze(1)))
self.assertEqual(output_dim_idx, 1)
# test new batch dims
X = torch.rand(3, 2, 2, 1, **tkwargs)
X_out, output_dim_idx = add_output_dim(
X=X, original_batch_shape=original_batch_shape
)
self.assertTrue(torch.equal(X_out, X.unsqueeze(2)))
self.assertEqual(output_dim_idx, 2)
class TestInputDataChecks(BotorchTestCase):
def test_check_no_nans(self):
check_no_nans(torch.tensor([1.0, 2.0]))
with self.assertRaises(InputDataError):
check_no_nans(torch.tensor([1.0, float("nan")]))
def test_check_min_max_scaling(self):
with settings.debug(True):
# check unscaled input in unit cube
X = 0.1 + 0.8 * torch.rand(4, 2, 3)
with warnings.catch_warnings(record=True) as ws:
check_min_max_scaling(X=X)
self.assertFalse(
any(issubclass(w.category, InputDataWarning) for w in ws)
)
check_min_max_scaling(X=X, raise_on_fail=True)
with warnings.catch_warnings(record=True) as ws:
check_min_max_scaling(X=X, strict=True)
self.assertTrue(
any(issubclass(w.category, InputDataWarning) for w in ws)
)
self.assertTrue(any("not scaled" in str(w.message) for w in ws))
with self.assertRaises(InputDataError):
check_min_max_scaling(X=X, strict=True, raise_on_fail=True)
# check proper input
Xmin, Xmax = X.min(dim=-1, keepdim=True)[0], X.max(dim=-1, keepdim=True)[0]
Xstd = (X - Xmin) / (Xmax - Xmin)
with warnings.catch_warnings(record=True) as ws:
check_min_max_scaling(X=Xstd)
self.assertFalse(
any(issubclass(w.category, InputDataWarning) for w in ws)
)
check_min_max_scaling(X=Xstd, raise_on_fail=True)
with warnings.catch_warnings(record=True) as ws:
check_min_max_scaling(X=Xstd, strict=True)
self.assertFalse(
any(issubclass(w.category, InputDataWarning) for w in ws)
)
check_min_max_scaling(X=Xstd, strict=True, raise_on_fail=True)
# check violation
X[0, 0, 0] = 2
with warnings.catch_warnings(record=True) as ws:
check_min_max_scaling(X=X)
self.assertTrue(
any(issubclass(w.category, InputDataWarning) for w in ws)
)
self.assertTrue(any("not contained" in str(w.message) for w in ws))
with self.assertRaises(InputDataError):
check_min_max_scaling(X=X, raise_on_fail=True)
with warnings.catch_warnings(record=True) as ws:
check_min_max_scaling(X=X, strict=True)
self.assertTrue(
any(issubclass(w.category, InputDataWarning) for w in ws)
)
self.assertTrue(any("not contained" in str(w.message) for w in ws))
with self.assertRaises(InputDataError):
check_min_max_scaling(X=X, strict=True, raise_on_fail=True)
# check ignore_dims
with warnings.catch_warnings(record=True) as ws:
check_min_max_scaling(X=X, ignore_dims=[0])
self.assertFalse(
any(issubclass(w.category, InputDataWarning) for w in ws)
)
def test_check_standardization(self):
Y = torch.randn(3, 4, 2)
# check standardized input
Yst = (Y - Y.mean(dim=-2, keepdim=True)) / Y.std(dim=-2, keepdim=True)
with settings.debug(True):
with warnings.catch_warnings(record=True) as ws:
check_standardization(Y=Yst)
self.assertFalse(
any(issubclass(w.category, InputDataWarning) for w in ws)
)
check_standardization(Y=Yst, raise_on_fail=True)
# check nonzero mean
with warnings.catch_warnings(record=True) as ws:
check_standardization(Y=Yst + 1)
self.assertTrue(
any(issubclass(w.category, InputDataWarning) for w in ws)
)
self.assertTrue(any("not standardized" in str(w.message) for w in ws))
with self.assertRaises(InputDataError):
check_standardization(Y=Yst + 1, raise_on_fail=True)
# check non-unit variance
with warnings.catch_warnings(record=True) as ws:
check_standardization(Y=Yst * 2)
self.assertTrue(
any(issubclass(w.category, InputDataWarning) for w in ws)
)
self.assertTrue(any("not standardized" in str(w.message) for w in ws))
with self.assertRaises(InputDataError):
check_standardization(Y=Yst * 2, raise_on_fail=True)
def test_validate_input_scaling(self):
train_X = 2 + torch.rand(3, 4, 3)
train_Y = torch.randn(3, 4, 2)
# check that nothing is being checked
with settings.validate_input_scaling(False), settings.debug(True):
with warnings.catch_warnings(record=True) as ws:
validate_input_scaling(train_X=train_X, train_Y=train_Y)
self.assertFalse(
any(issubclass(w.category, InputDataWarning) for w in ws)
)
# check that warnings are being issued
with settings.debug(True), warnings.catch_warnings(record=True) as ws:
validate_input_scaling(train_X=train_X, train_Y=train_Y)
self.assertTrue(any(issubclass(w.category, InputDataWarning) for w in ws))
# check that errors are raised when requested
with settings.debug(True):
with self.assertRaises(InputDataError):
validate_input_scaling(
train_X=train_X, train_Y=train_Y, raise_on_fail=True
)
# check that no errors are being raised if everything is standardized
train_X_min = train_X.min(dim=-1, keepdim=True)[0]
train_X_max = train_X.max(dim=-1, keepdim=True)[0]
train_X_std = (train_X - train_X_min) / (train_X_max - train_X_min)
train_Y_std = (train_Y - train_Y.mean(dim=-2, keepdim=True)) / train_Y.std(
dim=-2, keepdim=True
)
with settings.debug(True), warnings.catch_warnings(record=True) as ws:
validate_input_scaling(train_X=train_X_std, train_Y=train_Y_std)
self.assertFalse(any(issubclass(w.category, InputDataWarning) for w in ws))
# test that negative variances raise an error
train_Yvar = torch.rand_like(train_Y_std)
train_Yvar[0, 0, 1] = -0.5
with settings.debug(True):
with self.assertRaises(InputDataError):
validate_input_scaling(
train_X=train_X_std, train_Y=train_Y_std, train_Yvar=train_Yvar
)
# check that NaNs raise errors
train_X_std[0, 0, 0] = float("nan")
with settings.debug(True):
with self.assertRaises(InputDataError):
validate_input_scaling(train_X=train_X_std, train_Y=train_Y_std)
class TestGPTPosteriorSettings(BotorchTestCase):
def test_gpt_posterior_settings(self):
for propagate_grads in (False, True):
with settings.propagate_grads(propagate_grads):
with gpt_posterior_settings():
self.assertTrue(gpt_settings.debug.off())
self.assertTrue(gpt_settings.fast_pred_var.on())
if settings.propagate_grads.off():
self.assertTrue(gpt_settings.detach_test_caches.on())
else:
self.assertTrue(gpt_settings.detach_test_caches.off())
class TestFantasize(BotorchTestCase):
def test_fantasize(self):
self.assertFalse(fantasize.on())
self.assertTrue(fantasize.off())
with fantasize():
self.assertTrue(fantasize.on())
self.assertFalse(fantasize.off())
with fantasize(False):
self.assertFalse(fantasize.on())
self.assertTrue(fantasize.off())
| 46.102564 | 87 | 0.607898 |
6f050ee781da363ad4233d5ce5091e7cc797f9c6
| 49 |
py
|
Python
|
src/chained/__init__.py
|
Anubhav722/Users-Online-Status-Channels-
|
68857d5cb6ab3514502183fe1058ffe7a8479472
|
[
"MIT"
] | null | null | null |
src/chained/__init__.py
|
Anubhav722/Users-Online-Status-Channels-
|
68857d5cb6ab3514502183fe1058ffe7a8479472
|
[
"MIT"
] | null | null | null |
src/chained/__init__.py
|
Anubhav722/Users-Online-Status-Channels-
|
68857d5cb6ab3514502183fe1058ffe7a8479472
|
[
"MIT"
] | null | null | null |
default_app_config = 'chained.apps.ChainedConfig'
| 49 | 49 | 0.857143 |
860c040642523dbb5d725c8d36977027b0ffeedc
| 9,094 |
py
|
Python
|
query/inference/country_info_template_fd.py
|
amitmaharana/bilingual-qald
|
8ea35425f02a8f7f48df3ec89d4d2ee3903d16c3
|
[
"Apache-2.0"
] | null | null | null |
query/inference/country_info_template_fd.py
|
amitmaharana/bilingual-qald
|
8ea35425f02a8f7f48df3ec89d4d2ee3903d16c3
|
[
"Apache-2.0"
] | null | null | null |
query/inference/country_info_template_fd.py
|
amitmaharana/bilingual-qald
|
8ea35425f02a8f7f48df3ec89d4d2ee3903d16c3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
rule based query generation
------------
country
"""
from refo import Star, Any
from inference.basic_inference import W, Rule, KeywordRule
from inference.basic_inference import SPARQL_PREFIX, SPARQL_ASK_TEM, SPARQL_COUNT_TEM, SPARQL_SELECT_TEM,SPARQL_SELECT_TEM_FD
from inference.basic_inference import pos_person, pos_book_or_movie, pos_number, person_entity, book_or_movie_entity, number_entity, pos_country, country_entity
from inference.basic_inference import CountryPropertyValueSet
import re
"""
country information
"""
population = (W('population')|W('人口') )
areatotal = (W('size')|W('how')+W('large')|W('多大')|W('面积')|W('国土面积'))
foundingdate = (W('foundingdate')|W('建国日'))
longname = (W('longname')|W('全称'))
summary = (W('introduction') | W('abstract') |W('介绍') | W('简介'))
currency =(W('currency')|W('货币'))
language = (W('language')|W('语言'))
capital = (W('capital')|W('首都') | W('行政中心'))
leader = (W('leader')|W('leaders')|W('领导人'))
largestcity = (W('最大城市'))
largest = W('largest')
city = W('city')
country_info = (foundingdate | population | areatotal | longname | summary | currency)
"""
SPARQL Template
"""
class QuestionSet:
def __init__(self):
pass
@staticmethod
def has_country_info(word_objects):
"""
The basic information about a country
:param word_objects:
:return:
"""
keyword = None
for r in basic_country_info_fd:
keyword = r.apply(word_objects)
if keyword is not None:
keyword_split = re.split("( )+", keyword)
keyword_db = keyword_split.pop()
keyword_split.pop()
keyword_douban = keyword_split.pop()
break
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_country:
e_douban = u"?c rdfs:label '{country}'@en." \
u"?c {keyword} ?x.".format(country=w.token, keyword=keyword_douban)
e_db = u"?c rdfs:label '{country}'@en.\n" \
u"?c {keyword} ?x".format(country=re.sub(r"(\w)([A-Z])", r"\1 \2", w.token), keyword=keyword_db)
sparql = SPARQL_SELECT_TEM_FD.format(prefix=SPARQL_PREFIX,
select=select,
expression_douban=e_douban,
expression_db=e_db)
break
return sparql
@staticmethod
def has_largestcity(word_objects):
"""
largestcity of a country
:param word_objects:
:return:
"""
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_country:
e_douban = u"?c rdfs:label '{country}'@en.\n" \
u"?c dbo:largestCity ?l.\n" \
u"?l foaf:name ?x".format(country=w.token)
e_db = u"?c rdfs:label '{country}'@en.\n" \
u"?c dbo:largestCity ?l.\n" \
u"?l foaf:name ?x".format(country=re.sub(r"(\w)([A-Z])", r"\1 \2", w.token))
sparql = SPARQL_SELECT_TEM_FD.format(prefix=SPARQL_PREFIX,
select=select,
expression_douban=e_douban,
expression_db=e_db)
break
return sparql
@staticmethod
def has_capital(word_objects):
"""
largestcity of a country
:param word_objects:
:return:
"""
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_country:
e_douban = u"?c rdfs:label '{country}'@en.\n" \
u"?c dbo:capital ?l.\n" \
u"?l foaf:name ?x".format(country=w.token)
e_db = u"?c rdfs:label '{country}'@en.\n" \
u"?c dbo:capital ?l.\n" \
u"?l foaf:name ?x".format(country=re.sub(r"(\w)([A-Z])", r"\1 \2", w.token))
sparql = SPARQL_SELECT_TEM_FD.format(prefix=SPARQL_PREFIX,
select=select,
expression_douban=e_douban,
expression_db=e_db)
break
return sparql
@staticmethod
def has_language(word_objects):
"""
largestcity of a country
:param word_objects:
:return:
"""
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_country:
e_douban = u"?c rdfs:label '{country}'@en.\n" \
u"?c dbo:language ?l.\n" \
u"?l foaf:name ?x".format(country=w.token)
e_db = u"?c rdfs:label '{country}'@en.\n" \
u"?c dbo:language ?l.\n" \
u"?l foaf:name ?x".format(country=re.sub(r"(\w)([A-Z])", r"\1 \2", w.token))
sparql = SPARQL_SELECT_TEM_FD.format(prefix=SPARQL_PREFIX,
select=select,
expression_douban=e_douban,
expression_db=e_db)
break
return sparql
@staticmethod
def has_leader(word_objects):
"""
largestcity of a country
:param word_objects:
:return:
"""
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_country:
e_douban = u"?c rdfs:label '{country}'@en.\n" \
u"?c dbo:leader ?l.\n" \
u"?l foaf:name ?x".format(country=w.token)
e_db = u"?c rdfs:label '{country}'@en.\n" \
u"?c dbo:leader ?l.\n" \
u"?l foaf:name ?x".format(country=re.sub(r"(\w)([A-Z])", r"\1 \2", w.token))
sparql = SPARQL_SELECT_TEM_FD.format(prefix=SPARQL_PREFIX,
select=select,
expression_douban=e_douban,
expression_db=e_db)
break
return sparql
country_info_rules_fd = [
Rule(condition_num=12,condition=(country_entity + Star(Any(), greedy=False) + leader + Star(Any(), greedy=False))|(leader + Star(Any(), greedy=False) + country_entity + Star(Any(), greedy=False)), action=QuestionSet.has_leader),
Rule(condition_num=12,condition=(country_entity + Star(Any(), greedy=False) + capital + Star(Any(), greedy=False))|(capital + Star(Any(), greedy=False) + country_entity + Star(Any(), greedy=False)), action=QuestionSet.has_capital),
Rule(condition_num=12,condition=(country_entity + Star(Any(), greedy=False) + language + Star(Any(), greedy=False))|(language + Star(Any(), greedy=False) + country_entity + Star(Any(), greedy=False)), action=QuestionSet.has_language),
Rule(condition_num=12,condition=(country_entity + Star(Any(), greedy=False) + country_info + Star(Any(), greedy=False))|(country_info + Star(Any(), greedy=False) + country_entity + Star(Any(), greedy=False)), action=QuestionSet.has_country_info),
Rule(condition_num=12,condition=(country_entity + Star(Any(), greedy=False) + largestcity + Star(Any(), greedy=False))|(largestcity + Star(Any(), greedy=False) + country_entity + Star(Any(), greedy=False))|(largest+ Star(Any(), greedy=False)+city + Star(Any(), greedy=False) + country_entity + Star(Any(), greedy=False)) |(country_entity + Star(Any(), greedy=False)+largest+ Star(Any(), greedy=False)+city + Star(Any(), greedy=False)), action=QuestionSet.has_largestcity),
]
basic_country_info_fd = [
KeywordRule(condition=country_entity + Star(Any(), greedy=False) + foundingdate + Star(Any(), greedy=False), action=CountryPropertyValueSet.return_country_info_foundingdate_FD),
KeywordRule(condition=country_entity + Star(Any(), greedy=False) + longname + Star(Any(), greedy=False), action=CountryPropertyValueSet.return_country_info_longname_FD),
KeywordRule(condition=country_entity + Star(Any(), greedy=False) + summary + Star(Any(), greedy=False), action=CountryPropertyValueSet.return_country_info_summary_FD),
KeywordRule(condition=country_entity + Star(Any(), greedy=False) + currency + Star(Any(), greedy=False), action=CountryPropertyValueSet.return_country_info_currency_FD),
KeywordRule(condition=country_entity + Star(Any(), greedy=False) + population + Star(Any(), greedy=False), action=CountryPropertyValueSet.return_country_info_population_FD),
KeywordRule(condition=country_entity + Star(Any(), greedy=False) + areatotal + Star(Any(), greedy=False), action=CountryPropertyValueSet.return_country_info_areatotal_FD),
]
| 46.397959 | 476 | 0.553002 |
d960770f87cbb724ea1dff425b1536e10e26d203
| 12,265 |
py
|
Python
|
research/cv/arcface/infer/sdk/main_sdk_jpg.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/arcface/infer/sdk/main_sdk_jpg.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/arcface/infer/sdk/main_sdk_jpg.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# coding=utf-8
"""
Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import json
import os
import argparse
import pickle
import numpy as np
import cv2
import sklearn
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from scipy import interpolate
import MxpiDataType_pb2 as MxpiDataType
from StreamManagerApi import StreamManagerApi, StringVector, MxDataInput
class LFold:
'''
LFold
'''
def __init__(self, n_splits=2, shuffle=False):
self.n_splits = n_splits
if self.n_splits > 1:
self.k_fold = KFold(n_splits=n_splits, shuffle=shuffle)
def split(self, indices):
if self.n_splits > 1:
return self.k_fold.split(indices)
return [(indices, indices)]
def calculate_roc(thresholds,
embeddings1,
embeddings2,
actual_issame,
nrof_folds=10,
pca=0):
'''
calculate_roc
'''
assert embeddings1.shape[0] == embeddings2.shape[0]
assert embeddings1.shape[1] == embeddings2.shape[1]
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = LFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds, nrof_thresholds))
fprs = np.zeros((nrof_folds, nrof_thresholds))
accuracy = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
if pca == 0:
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
if pca > 0:
print('doing pca on', fold_idx)
embed1_train = embeddings1[train_set]
embed2_train = embeddings2[train_set]
_embed_train = np.concatenate((embed1_train, embed2_train), axis=0)
pca_model = PCA(n_components=pca)
pca_model.fit(_embed_train)
embed1 = pca_model.transform(embeddings1)
embed2 = pca_model.transform(embeddings2)
embed1 = sklearn.preprocessing.normalize(embed1)
embed2 = sklearn.preprocessing.normalize(embed2)
diff = np.subtract(embed1, embed2)
dist = np.sum(np.square(diff), 1)
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(
threshold, dist[train_set], actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _ = calculate_accuracy(
threshold, dist[test_set],
actual_issame[test_set])
_, _, accuracy[fold_idx] = calculate_accuracy(
thresholds[best_threshold_index], dist[test_set],
actual_issame[test_set])
tpr = np.mean(tprs, 0)
fpr = np.mean(fprs, 0)
return tpr, fpr, accuracy
def calculate_accuracy(threshold, dist, actual_issame):
'''calculate_acc
'''
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(
np.logical_and(np.logical_not(predict_issame),
np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)
fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)
acc = float(tp + tn) / dist.size
return tpr, fpr, acc
def calculate_val(thresholds,
embeddings1,
embeddings2,
actual_issame,
far_target,
nrof_folds=10):
'''
calculate_val
'''
assert embeddings1.shape[0] == embeddings2.shape[0]
assert embeddings1.shape[1] == embeddings2.shape[1]
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = LFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
# Find the threshold that gives FAR = far_target
far_train = np.zeros(nrof_thresholds)
for threshold_idx, threshold in enumerate(thresholds):
_, far_train[threshold_idx] = calculate_val_far(
threshold, dist[train_set], actual_issame[train_set])
if np.max(far_train) >= far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
val[fold_idx], far[fold_idx] = calculate_val_far(
threshold, dist[test_set], actual_issame[test_set])
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
return val_mean, val_std, far_mean
def calculate_val_far(threshold, dist, actual_issame):
'''
calculate_val_far
'''
predict_issame = np.less(dist, threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
false_accept = np.sum(
np.logical_and(predict_issame, np.logical_not(actual_issame)))
n_same = np.sum(actual_issame)
n_diff = np.sum(np.logical_not(actual_issame))
val = float(true_accept) / float(n_same)
far = float(false_accept) / float(n_diff)
return val, far
def evaluate(embeddings, actual_issame, nrof_folds=10, pca=0):
'''evaluate
'''
# Calculate evaluation metrics
thresholds = np.arange(0, 4, 0.01)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
tpr, fpr, accuracy = calculate_roc(thresholds,
embeddings1,
embeddings2,
np.asarray(actual_issame),
nrof_folds=nrof_folds,
pca=pca)
thresholds = np.arange(0, 4, 0.001)
val, val_std, far = calculate_val(thresholds,
embeddings1,
embeddings2,
np.asarray(actual_issame),
1e-3,
nrof_folds=nrof_folds)
return tpr, fpr, accuracy, val, val_std, far
def inference(dir_name, res_dir_name, PL_PATH):
'''inference
'''
_, issame_list = pickle.load(open(data_path + '.bin', 'rb'), encoding='bytes')
stream_manager_api = StreamManagerApi()
ret = stream_manager_api.InitManager()
if ret != 0:
print("Failed to init Stream manager, ret=%s" % str(ret))
exit()
# create streams by pipeline config file
with open(PL_PATH, 'rb') as f:
pipelineStr = f.read()
ret = stream_manager_api.CreateMultipleStreams(pipelineStr)
if ret != 0:
print("Failed to create Stream, ret=%s" % str(ret))
exit()
# Construct the input of the stream
data_input = MxDataInput()
file_list = os.listdir(dir_name)
if not os.path.exists(res_dir_name):
os.makedirs(res_dir_name)
cnt = 0
leng = int(len(file_list) / 2)
embeding = np.zeros(shape=[leng, 512], dtype=np.float32)
embeding_f = np.zeros(shape=[leng, 512], dtype=np.float32)
for file_name in file_list:
stream_name = b'im_arcface'
in_plugin_id = 0
file_path = os.path.join(dir_name, file_name)
img_decode = cv2.imread(file_path)
img_decode = np.transpose(img_decode, axes=(2, 0, 1))
img_decode = img_decode.reshape([112, 112, 3])
_, encoded_image = cv2.imencode(".jpg", img_decode)
img_bytes = encoded_image.tobytes()
data_input.data = img_bytes
unique_id = stream_manager_api.SendData(stream_name, in_plugin_id, data_input)
if unique_id < 0:
print("Failed to send data to stream.")
exit()
keys = [b"mxpi_tensorinfer0"]
keyVec = StringVector()
for key in keys:
keyVec.push_back(key)
start_time = datetime.datetime.now()
infer_result = stream_manager_api.GetProtobuf(stream_name, in_plugin_id, keyVec)
end_time = datetime.datetime.now()
if infer_result.size() == 0:
print("infer_result is null")
exit()
if infer_result[0].errorCode != 0:
print("GetProtobuf error. errorCode=%d, errorMsg=%s" % (
infer_result[0].errorCode, infer_result[0].data.decode()))
exit()
resultList = MxpiDataType.MxpiTensorPackageList()
resultList.ParseFromString(infer_result[0].messageBuf)
output = np.frombuffer(resultList.tensorPackageVec[0].tensorVec[0].dataStr, dtype='<f4')
if file_name.startswith('f'):
tmp_list = file_name.split('_')
tmp_num = int(tmp_list[1])
embeding_f[tmp_num] = output
save_path = os.path.join(res_dir_name, f"f_{tmp_num}.json")
else:
tmp_list = file_name.split('_')
tmp_num = int(tmp_list[0])
embeding[tmp_num] = output
save_path = os.path.join(res_dir_name, f"{tmp_num}.json")
cnt += 1
with open(save_path, "w") as fp:
fp.write(json.dumps(output.tolist()))
if cnt % 1000 == 0:
print('sdk run time: {}'.format((end_time - start_time).microseconds))
print(
f"End-2end inference, file_name: {save_path}, {cnt + 1}/{len(file_list)}, elapsed_time: {end_time}.\n"
)
# destroy streams
stream_manager_api.DestroyAllStreams()
embeddings = embeding
embeddings = sklearn.preprocessing.normalize(embeddings)
_, _, acc, _, _, _ = evaluate(embeddings, issame_list)
acc1 = np.mean(acc)
std1 = np.std(acc)
embeddings = embeding + embeding_f
embeddings = sklearn.preprocessing.normalize(embeddings)
print(embeddings.shape)
_, _, accuracy, _, _, _ = evaluate(
embeddings, issame_list)
acc2, std2 = np.mean(accuracy), np.std(accuracy)
print('[%s]Accuracy: %1.5f+-%1.5f' % (dir_name.split('/')[-1], acc1, std1))
print('[%s]Accuracy-Flip: %1.5f+-%1.5f' %
(dir_name.split('/')[-1], acc2, std2))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Datasets
parser.add_argument('--eval_url', default='../data/input/data/', type=str,
help='data path')
parser.add_argument('--PL_PATH', default='../data/config/arcface.pipeline', type=str,
help='output path')
parser.add_argument('--result_url', default='../data/sdk_out/', type=str)
parser.add_argument('--target',
default='lfw',
help='test targets.')
# lfw,cfp_fp,agedb_30,calfw,cplfw
args = parser.parse_args()
ver_list = []
ver_name_list = []
for name in args.target.split(','):
data_path = os.path.join(args.eval_url, name)
output_path = os.path.join(args.result_url, name)
if os.path.exists(data_path):
if not os.path.exists(output_path):
os.mkdir(output_path)
inference(data_path, output_path, args.PL_PATH)
| 37.393293 | 118 | 0.619976 |
244429d49fb5dd1707b5b43919de98efd5261127
| 1,475 |
py
|
Python
|
task_set/optimizers/zero.py
|
pedersor/google-research
|
6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6
|
[
"Apache-2.0"
] | null | null | null |
task_set/optimizers/zero.py
|
pedersor/google-research
|
6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6
|
[
"Apache-2.0"
] | null | null | null |
task_set/optimizers/zero.py
|
pedersor/google-research
|
6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An optimizer that simply zeros out weights.
This is a sanity check optimizer. All other optimizer should obtain higher loss
than this. Additionally values produced by this optimizer are used for
normalization.
"""
from typing import List
from task_set import registry
from task_set.optimizers import base
import tensorflow.compat.v1 as tf
@registry.optimizers_registry.register_fixed("zero")
class ZeroOptimizer(base.BaseOptimizer):
r"""Zero out all weights each step.
This optimizer is only to be used as a sanity check as it should only work
well when parameters are initialized extremely poorly.
"""
def minimize(self, loss, global_step,
var_list):
"""Create op that zeros out all weights."""
assign_ops = [v.assign(v * 0.) for v in var_list]
return tf.group(global_step.assign_add(1), *assign_ops, name="minimize")
| 33.522727 | 79 | 0.754576 |
3dc9240d93e780290fe5315380713daf197d45b3
| 1,501 |
py
|
Python
|
ballerinaByExample/vendor/pygments/pygments/styles/xcode.py
|
ayeshLK/ballerina-release
|
d3edd2f60f315412068f4cf4280317814b1b83e4
|
[
"Apache-2.0"
] | 182 |
2017-03-05T07:43:13.000Z
|
2022-03-15T13:09:07.000Z
|
ballerinaByExample/vendor/pygments/pygments/styles/xcode.py
|
ayeshLK/ballerina-release
|
d3edd2f60f315412068f4cf4280317814b1b83e4
|
[
"Apache-2.0"
] | 342 |
2015-01-09T10:58:37.000Z
|
2022-03-31T22:20:14.000Z
|
ballerinaByExample/vendor/pygments/pygments/styles/xcode.py
|
ayeshLK/ballerina-release
|
d3edd2f60f315412068f4cf4280317814b1b83e4
|
[
"Apache-2.0"
] | 113 |
2018-03-07T02:07:58.000Z
|
2019-09-10T13:50:04.000Z
|
# -*- coding: utf-8 -*-
"""
pygments.styles.xcode
~~~~~~~~~~~~~~~~~~~~~
Style similar to the `Xcode` default theme.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Literal
class XcodeStyle(Style):
"""
Style similar to the Xcode default colouring theme.
"""
default_style = ''
styles = {
Comment: '#177500',
Comment.Preproc: '#633820',
String: '#C41A16',
String.Char: '#2300CE',
Operator: '#000000',
Keyword: '#A90D91',
Name: '#000000',
Name.Attribute: '#836C28',
Name.Class: '#3F6E75',
Name.Function: '#000000',
Name.Builtin: '#A90D91',
# In Obj-C code this token is used to colour Cocoa types
Name.Builtin.Pseudo: '#5B269A',
Name.Variable: '#000000',
Name.Tag: '#000000',
Name.Decorator: '#000000',
# Workaround for a BUG here: lexer treats multiline method signatres as labels
Name.Label: '#000000',
Literal: '#1C01CE',
Number: '#1C01CE',
Error: '#000000',
}
| 28.865385 | 86 | 0.484344 |
116ffea4a28e09678f3c1cd0790173560182906e
| 346 |
py
|
Python
|
utils/prefix.py
|
FiireWiinter/Invite-Management
|
da81a0145fe4db93c988eb3b01260023e8a69e11
|
[
"MIT"
] | 1 |
2020-12-23T04:49:17.000Z
|
2020-12-23T04:49:17.000Z
|
utils/prefix.py
|
FiireWiinter/Invite-Managment
|
da81a0145fe4db93c988eb3b01260023e8a69e11
|
[
"MIT"
] | null | null | null |
utils/prefix.py
|
FiireWiinter/Invite-Managment
|
da81a0145fe4db93c988eb3b01260023e8a69e11
|
[
"MIT"
] | null | null | null |
from discord import DMChannel
async def prefix(ctx):
if isinstance(ctx.channel, DMChannel):
return 'im!'
else:
async with ctx.bot.pool.acquire() as db:
prefix_ = await db.fetchval(
"SELECT prefix FROM guilds WHERE ID=$1",
ctx.guild.id
)
return prefix_
| 24.714286 | 56 | 0.552023 |
4a997d07e759e47d012fece1697a3721829fc758
| 1,542 |
py
|
Python
|
custom_api/helpers.py
|
aitor-garcia-p/NCRFpp
|
3e83fc6c462941eff65b8b42bfe6630277b92d6b
|
[
"Apache-2.0"
] | null | null | null |
custom_api/helpers.py
|
aitor-garcia-p/NCRFpp
|
3e83fc6c462941eff65b8b42bfe6630277b92d6b
|
[
"Apache-2.0"
] | null | null | null |
custom_api/helpers.py
|
aitor-garcia-p/NCRFpp
|
3e83fc6c462941eff65b8b42bfe6630277b92d6b
|
[
"Apache-2.0"
] | null | null | null |
from typing import List, Tuple, Dict
from custom_api.feature_generators import FeatureGenerator
FAKE_LABEL = 'O'
def compute_features(tokens: List[str], feature_generators: List[FeatureGenerator], separator: str = '\t', labels: List[str] = None,
lowercase_tokens=True) \
-> List[str]:
"""
Receives a list of tokens, and computes the features for each of them (where are those features specified?)
The result is a list of tuples, containing each token and a dictionary of features.
Labels are the provided gold labels (same number as tokens). If missing (for inference) a fake 'O' label is added.
:param feature_generators:
:param labels:
:param separator:
:param tokens:
:return:
"""
all_features = []
for feature_generator in feature_generators:
features = feature_generator.generate_feature(tokens)
all_features.append(features)
# At this point the features have been generated, we can proceed to normalize the tokens if necessary
# e.g. lowercase them (or remove some diacritic marks, etc.)
featurized_output = []
for i, token in enumerate(tokens):
line = token.lower() if lowercase_tokens else token
for feature in all_features:
line += '{}[{}]{}'.format(separator, feature[0], feature[1][i])
if labels:
line += separator + labels[i]
else:
line += separator + FAKE_LABEL
featurized_output.append(line.strip() + '\n')
return featurized_output
| 37.609756 | 132 | 0.669261 |
16602945119f5e4da1de4ca3fcf137259dc279d7
| 1,673 |
py
|
Python
|
neodroidagent/utilities/exploration/sampling/random_process/self_avoiding.py
|
gitter-badger/agent
|
3f53eaa7ebdee3ab423c7b58785d584fe1a6ae11
|
[
"Apache-2.0"
] | 8 |
2017-09-13T08:28:44.000Z
|
2022-01-21T15:59:19.000Z
|
neodroidagent/utilities/exploration/sampling/random_process/self_avoiding.py
|
gitter-badger/agent
|
3f53eaa7ebdee3ab423c7b58785d584fe1a6ae11
|
[
"Apache-2.0"
] | 4 |
2019-03-22T13:49:16.000Z
|
2019-03-25T13:49:39.000Z
|
neodroidagent/utilities/exploration/sampling/random_process/self_avoiding.py
|
gitter-badger/agent
|
3f53eaa7ebdee3ab423c7b58785d584fe1a6ae11
|
[
"Apache-2.0"
] | 3 |
2017-09-13T08:31:38.000Z
|
2021-11-09T11:22:27.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .random_process import RandomProcess
__author__ = "Christian Heider Nielsen"
import random
import numpy
__all__ = ["SelfAvoiding"]
class SelfAvoiding(RandomProcess):
def __init__(self, num_of_options=4, n=10):
self.num_of_options = num_of_options
self.n = n
self.reset()
def sample(self, steps=1):
while (
(self.x > 0)
and (self.x < self.n - 1)
and (self.y > 0)
and (self.y < self.n - 1)
):
self.a[self.x][self.y] = 1
if (
self.a[self.x - 1][self.y]
and self.a[self.x + 1][self.y]
and self.a[self.x][self.y - 1]
and self.a[self.x][self.y + 1]
):
self.deadEnds += 1
return self.a[self.x - 1][self.y]
r = random.randrange(1, self.num_of_options + 1)
if (r == 1) and (not self.a[self.x + 1][self.y]):
self.x += 1
elif (r == 2) and (not self.a[self.x - 1][self.y]):
self.x -= 1
elif (r == 3) and (not self.a[self.x][self.y + 1]):
self.y += 1
elif (r == 4) and (not self.a[self.x][self.y - 1]):
self.y -= 1
return self.a[self.x - 1][self.y]
def reset(self):
self.deadEnds = 0
self.a = numpy.zeros((self.n, self.n))
self.x = self.n // 2
self.y = self.n // 2
if __name__ == "__main__":
def main(n=5, trials=3):
r = SelfAvoiding()
for t in range(trials):
print(r.sample())
main()
| 24.970149 | 63 | 0.468619 |
c535300a68c8b92a93393b2de81fea8a4efa76e7
| 8,322 |
py
|
Python
|
torchvision/models/quantization/googlenet.py
|
abhi-glitchhg/vision
|
12bb88738a5b6b96767c4165282f644495780a80
|
[
"BSD-3-Clause"
] | 1 |
2022-02-14T09:16:02.000Z
|
2022-02-14T09:16:02.000Z
|
torchvision/models/quantization/googlenet.py
|
abhi-glitchhg/vision
|
12bb88738a5b6b96767c4165282f644495780a80
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/models/quantization/googlenet.py
|
abhi-glitchhg/vision
|
12bb88738a5b6b96767c4165282f644495780a80
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
from functools import partial
from typing import Any, Optional, Union
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn import functional as F
from ...transforms._presets import ImageClassification
from .._api import WeightsEnum, Weights
from .._meta import _IMAGENET_CATEGORIES
from .._utils import handle_legacy_interface, _ovewrite_named_param
from ..googlenet import GoogLeNetOutputs, BasicConv2d, Inception, InceptionAux, GoogLeNet, GoogLeNet_Weights
from .utils import _fuse_modules, _replace_relu, quantize_model
__all__ = [
"QuantizableGoogLeNet",
"GoogLeNet_QuantizedWeights",
"googlenet",
]
class QuantizableBasicConv2d(BasicConv2d):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.relu = nn.ReLU()
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
_fuse_modules(self, ["conv", "bn", "relu"], is_qat, inplace=True)
class QuantizableInception(Inception):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]
self.cat = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return self.cat.cat(outputs, 1)
class QuantizableInceptionAux(InceptionAux):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]
self.relu = nn.ReLU()
def forward(self, x: Tensor) -> Tensor:
# aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14
x = F.adaptive_avg_pool2d(x, (4, 4))
# aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4
x = self.conv(x)
# N x 128 x 4 x 4
x = torch.flatten(x, 1)
# N x 2048
x = self.relu(self.fc1(x))
# N x 1024
x = self.dropout(x)
# N x 1024
x = self.fc2(x)
# N x 1000 (num_classes)
return x
class QuantizableGoogLeNet(GoogLeNet):
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__( # type: ignore[misc]
blocks=[QuantizableBasicConv2d, QuantizableInception, QuantizableInceptionAux], *args, **kwargs
)
self.quant = torch.ao.quantization.QuantStub()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x: Tensor) -> GoogLeNetOutputs:
x = self._transform_input(x)
x = self.quant(x)
x, aux1, aux2 = self._forward(x)
x = self.dequant(x)
aux_defined = self.training and self.aux_logits
if torch.jit.is_scripting():
if not aux_defined:
warnings.warn("Scripted QuantizableGoogleNet always returns GoogleNetOutputs Tuple")
return GoogLeNetOutputs(x, aux2, aux1)
else:
return self.eager_outputs(x, aux2, aux1)
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
r"""Fuse conv/bn/relu modules in googlenet model
Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
Model is modified in place. Note that this operation does not change numerics
and the model after modification is in floating point
"""
for m in self.modules():
if type(m) is QuantizableBasicConv2d:
m.fuse_model(is_qat)
class GoogLeNet_QuantizedWeights(WeightsEnum):
IMAGENET1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/googlenet_fbgemm-c00238cf.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
"num_params": 6624904,
"min_size": (15, 15),
"categories": _IMAGENET_CATEGORIES,
"backend": "fbgemm",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
"unquantized": GoogLeNet_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 69.826,
"acc@5": 89.404,
}
},
"_docs": """
These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
weights listed below.
""",
},
)
DEFAULT = IMAGENET1K_FBGEMM_V1
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: GoogLeNet_QuantizedWeights.IMAGENET1K_FBGEMM_V1
if kwargs.get("quantize", False)
else GoogLeNet_Weights.IMAGENET1K_V1,
)
)
def googlenet(
*,
weights: Optional[Union[GoogLeNet_QuantizedWeights, GoogLeNet_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableGoogLeNet:
"""GoogLeNet (Inception v1) model architecture from `Going Deeper with Convolutions <http://arxiv.org/abs/1409.4842>`__.
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` or :class:`~torchvision.models.GoogLeNet_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
quantize (bool, optional): If True, return a quantized version of the model. Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableGoogLeNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/googlenet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.GoogLeNet_QuantizedWeights
:members:
.. autoclass:: torchvision.models.GoogLeNet_Weights
:members:
:noindex:
"""
weights = (GoogLeNet_QuantizedWeights if quantize else GoogLeNet_Weights).verify(weights)
original_aux_logits = kwargs.get("aux_logits", False)
if weights is not None:
if "transform_input" not in kwargs:
_ovewrite_named_param(kwargs, "transform_input", True)
_ovewrite_named_param(kwargs, "aux_logits", True)
_ovewrite_named_param(kwargs, "init_weights", False)
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
if "backend" in weights.meta:
_ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
backend = kwargs.pop("backend", "fbgemm")
model = QuantizableGoogLeNet(**kwargs)
_replace_relu(model)
if quantize:
quantize_model(model, backend)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress))
if not original_aux_logits:
model.aux_logits = False
model.aux1 = None # type: ignore[assignment]
model.aux2 = None # type: ignore[assignment]
else:
warnings.warn(
"auxiliary heads in the pretrained googlenet model are NOT pretrained, so make sure to train them"
)
return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
from ..googlenet import model_urls # noqa: F401
quant_model_urls = _ModelURLs(
{
# fp32 GoogLeNet ported from TensorFlow, with weights quantized in PyTorch
"googlenet_fbgemm": GoogLeNet_QuantizedWeights.IMAGENET1K_FBGEMM_V1.url,
}
)
| 37.656109 | 151 | 0.653329 |
5cc2d143591133ecedfee29745674ad8cbd4eb88
| 410 |
py
|
Python
|
src/wrf/projutils.py
|
khallock/wrf-python
|
9c5825c101722e7eddece2ca13cc8e9d9f96a21e
|
[
"Apache-2.0"
] | 1 |
2018-10-30T18:06:26.000Z
|
2018-10-30T18:06:26.000Z
|
src/wrf/projutils.py
|
mostamndi/wrf-python
|
3806bcdd01b31fa67da980eafefa0d1245faf6a6
|
[
"Apache-2.0"
] | null | null | null |
src/wrf/projutils.py
|
mostamndi/wrf-python
|
3806bcdd01b31fa67da980eafefa0d1245faf6a6
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import (absolute_import, division, print_function)
from .py3compat import viewitems
def dict_keys_to_upper(d):
"""Return a dictionary with the keys changed to uppercase.
Args:
d (:obj:`dict`): A dictionary.
Returns:
:obj:`dict`: A dictionary with uppercase keys.
"""
return {key.upper() : val for key, val in viewitems(d)}
| 22.777778 | 66 | 0.62439 |
27eb8bb22a0998ad1d96e8dd520af2a025cb26ef
| 2,059 |
py
|
Python
|
src/argument.py
|
kibernetika-ai/Image-Denoising-with-Deep-CNNs
|
c081b85fee0fb72e74b0bf4beae90f0bb8bb0a6a
|
[
"MIT"
] | null | null | null |
src/argument.py
|
kibernetika-ai/Image-Denoising-with-Deep-CNNs
|
c081b85fee0fb72e74b0bf4beae90f0bb8bb0a6a
|
[
"MIT"
] | null | null | null |
src/argument.py
|
kibernetika-ai/Image-Denoising-with-Deep-CNNs
|
c081b85fee0fb72e74b0bf4beae90f0bb8bb0a6a
|
[
"MIT"
] | null | null | null |
import argparse
def parse():
'''
Add arguments.
'''
parser = argparse.ArgumentParser(
description='Bird-Species-Classification-Using-Transfer-Learning')
parser.add_argument('--root_dir', type=str,
default='../dataset/BSDS300/images', help='root directory of dataset')
parser.add_argument('--output_dir', type=str,
default='../checkpoints/', help='directory of saved checkpoints')
parser.add_argument('--num_epochs', type=int,
default=200, help='number of epochs')
parser.add_argument('--D', type=int,
default=6, help='number of dilated convolutional layer')
parser.add_argument('--C', type=int,
default=64, help='kernel size of convolutional layer')
parser.add_argument('--plot', type=bool, default=False,
help='plot loss during training or not')
parser.add_argument('--model', type=str, default='dudncnn',
help='dncnn, udncnn, or dudncnn')
parser.add_argument('--lr', type=float, default=1e-3,
help='learning rate for training')
parser.add_argument('--image_size', type=tuple, default=(180, 180))
parser.add_argument('--test_image_size', type=tuple, default=(320, 320))
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--sigma', type=int, default=30)
parser.add_argument(
'--quantize',
choices=[None, 'static', 'fx_static'],
default=None
)
return parser.parse_args()
class Args():
'''
For jupyter notebook
'''
def __init__(self):
self.root_dir = '../dataset/BSDS300/images'
self.output_dir = '../checkpoints/'
self.num_epochs = 200
self.D = 6
self.C = 64
self.plot = False
self.model = 'dudncnn'
self.lr = 1e-3
self.image_size = (180, 180)
self.test_image_size = (320, 320)
self.batch_size = 4
self.sigma = 30
| 35.5 | 94 | 0.587178 |
549ac5afdaffc57f304728c233209a9da1313919
| 3,690 |
py
|
Python
|
forexcast.py
|
IntrospectData/forexcast
|
4d9be7a6944441c51035b4bcc5438cd6bb132a8c
|
[
"MIT"
] | 1 |
2021-01-29T18:28:16.000Z
|
2021-01-29T18:28:16.000Z
|
forexcast.py
|
IntrospectData/forexcast
|
4d9be7a6944441c51035b4bcc5438cd6bb132a8c
|
[
"MIT"
] | null | null | null |
forexcast.py
|
IntrospectData/forexcast
|
4d9be7a6944441c51035b4bcc5438cd6bb132a8c
|
[
"MIT"
] | 1 |
2020-01-28T22:55:01.000Z
|
2020-01-28T22:55:01.000Z
|
import datetime
import json
import os
import click
import requests
import pandas as pd
import plotly.offline as ply
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
default_config = {
"apikey": "-",
"quote_currency": "USD",
}
APIKEY = ''
QUOTE_CURRENCY = ''
HOME = os.getenv("HOME")
def _config():
apikey = input("Enter your IEX api key: ")
quote_currency = input("Enter quote currency: ")
if apikey and quote_currency:
return {
"apikey": apikey,
"quote_currency": quote_currency,
}
def _init():
if not os.path.isfile(HOME + '/.config/forexcast.json'):
with open(HOME + '/.config/forexcast.json', 'w') as f:
config = _config()
if config:
f.write(json.dumps(config))
else:
f.write(json.dumps(default_config))
with open(HOME + '/.config/forexcast.json', 'r') as f:
config = json.loads(f.read())
return config.get("apikey", "-"), config.get("quote_currency", "USD")
def create_df(currency="EUR", from_date="2019-01-01", to_date="2019-12-31", quote_currency="USD", apikey="-"):
url = "https://cloud.iexapis.com/stable/fx/historical?symbols={}{}&from={}&to={}&token={}".format(quote_currency, currency, from_date, to_date, apikey)
try:
response = json.loads(requests.get(url).content)[0]
data = {"ds": [], "y": []}
for item in response:
if item:
if item.get("rate") and item.get("date"):
rate = 1.00 / item.get("rate")
date = item.get("date")
data["ds"].append(date)
data["y"].append(rate)
df = pd.DataFrame(data)
return df
except Exception as e:
df = None
return df
def forecast(periods=365, df=None, out=None, currency="EUR", quote_currency="USD"):
if periods and df is not None:
m = Prophet(daily_seasonality=True, yearly_seasonality=True).fit(df)
future = m.make_future_dataframe(periods=periods)
forecast = m.predict(future)
if out:
with open(out + "/{}-{}-to-{}.json".format(str(datetime.date.today()), currency, quote_currency), "w") as f:
f.write(forecast.to_json())
fig = plot_plotly(m, forecast)
fig.update_layout(
title="{}:{} Forecast".format(currency, quote_currency),
xaxis_title="Date",
yaxis_title="Exchange Rate",
font=dict(
family="Courier New, monospace",
size=18,
)
)
ply.plot(fig)
@click.command()
@click.argument('currency')
@click.option('--from_date', default="2019-01-01", help="Data after this date will be included.")
@click.option('--to_date', default=str(datetime.date.today()), help="Data after this date will be excluded.")
@click.option('--periods', default=365, help='Periods to predict.')
@click.option('--out', default=None, help="If specified will save forecast in specified dir.")
def main(currency, from_date, to_date, periods, out):
"""
Simple currency forecasting for IEX data
* CURRENCY = 3 char ISO code for target currency
* Dates should be in YYYY-MM-DD format
"""
apikey, quote_currency = _init()
df = create_df(currency=currency, from_date=from_date, to_date=to_date, quote_currency=quote_currency, apikey=apikey)
out = os.path.realpath(out) if out else None
forecast(periods=periods, df=df, out=out, currency=currency, quote_currency=quote_currency)
if __name__ == '__main__':
main()
| 36.176471 | 155 | 0.598645 |
59f69dd4e424e3bf3e1bd4b93fb683f7cc9470da
| 1,278 |
py
|
Python
|
external/eospy/examples/push_transaction.py
|
unification-com/haiku-node-prototype
|
ea77aa90f6b3f08d004be1c24e6b8d62e83bc66b
|
[
"MIT"
] | 3 |
2018-06-15T18:02:05.000Z
|
2018-07-06T02:32:18.000Z
|
external/eospy/examples/push_transaction.py
|
unification-com/haiku-node-prototype
|
ea77aa90f6b3f08d004be1c24e6b8d62e83bc66b
|
[
"MIT"
] | 4 |
2018-08-17T06:51:34.000Z
|
2018-08-17T08:39:24.000Z
|
external/eospy/examples/push_transaction.py
|
unification-com/haiku-node-prototype
|
ea77aa90f6b3f08d004be1c24e6b8d62e83bc66b
|
[
"MIT"
] | null | null | null |
import eospy.cleos
ce = eospy.cleos.Cleos(url='http://api.pennstation.eosnewyork.io:7001')
payload = [
{
'args': {
"from": "eosio", # sender
"to": "silvercondor", # receiver
"quantity": '1.0000 EOS', # In EOS
"memo": "EOS to the moon",
},
"account": "eosio.token",
"name": "transfer",
"authorization": [{
"actor": "eosio",
"permission": "active",
}],
}
]
#Converting payload to binary
data=ce.abi_json_to_bin(payload[0]['account'],payload[0]['name'],payload[0]['args'])
#Inserting payload binary form as "data" field in original payload
payload[0]['data']=data['binargs']
#Removing the arguments field
payload[0].pop('args')
#final transaction formed
trx = {"actions":[payload[0]]}
# use a string or EOSKey for push_transaction
key = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
# use EOSKey:
# import eospy.keys
# key = eospy.keys.EOSKey('5HuaTWKeGzZhqyzuzFAjjFjPnnnjdgcp562oBSS8Wv1qgDSkR2W')
resp = ce.push_transaction(trx, key, broadcast=True)
print('------------------------------------------------')
print(resp)
print('------------------------------------------------')
| 32.769231 | 84 | 0.554773 |
4cf8a4c6c0689169a903294348c50af44db59200
| 11,060 |
py
|
Python
|
benchmarks/benchmark_overlap_blocker.py
|
kvpradap/py_entitymatching
|
4ff803df1a03cf4d77ef935357355e6de5dd9438
|
[
"BSD-3-Clause"
] | 165 |
2016-08-28T14:30:01.000Z
|
2022-03-29T17:24:03.000Z
|
benchmarks/benchmark_overlap_blocker.py
|
mvahit/py_entitymatching
|
6724081d7d95c547e5a51625b4a8207c6c1737f8
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 70 |
2016-11-22T00:35:22.000Z
|
2022-03-11T22:26:26.000Z
|
benchmarks/benchmark_overlap_blocker.py
|
mvahit/py_entitymatching
|
6724081d7d95c547e5a51625b4a8207c6c1737f8
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 53 |
2016-09-22T02:07:34.000Z
|
2022-03-19T18:57:06.000Z
|
# Write the benchmarking functions here.
# See "Writing benchmarks" in the asv docs for more information.
import os
import sys
import py_entitymatching as mg
p = mg.get_install_path()
datasets_path = os.sep.join([p, 'datasets', 'example_datasets'])
ob = mg.OverlapBlocker()
class TimeBlockTablesBooks:
timeout=500.0
def setup(self):
path_for_A = os.sep.join([datasets_path, 'books', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'books', 'B.csv'])
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'ID')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'ID')
except AssertionError:
print("Dataset \'books\' not found. Please visit the project "
"website to download the dataset.")
raise SystemExit
def time_block_tables_title_2(self):
ob.block_tables(self.A, self.B, 'Title', 'Title', overlap_size=2,
l_output_attrs=['Title', 'Author', 'ISBN13',
'Publisher','Publication_Date'],
r_output_attrs=['Title', 'Author', 'ISBN13',
'Publisher','Publication_Date'])
def teardown(self):
del self.A
del self.B
class TimeBlockTablesBeer:
timeout=500.0
def setup(self):
path_for_A = os.sep.join([datasets_path, 'beer', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'beer', 'B.csv'])
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'Label')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'Label')
except AssertionError:
print("Dataset \'beer\' not found. Please visit the project "
"website to download the dataset.")
raise SystemExit
def time_block_tables_beer_name_3(self):
ob.block_tables(self.A, self.B, 'Beer_Name', 'Beer_Name',
overlap_size=3, l_output_attrs=['Beer_Name'],
r_output_attrs=['Beer_Name'])
def time_block_tables_brew_factory_name_2(self):
ob.block_tables(self.A, self.B, 'Brew_Factory_Name',
'Brew_Factory_Name', overlap_size=2,
l_output_attrs=['Beer_Name', 'ABV'],
r_output_attrs=['Beer_Name', 'ABV'])
def teardown(self):
del self.A
del self.B
class TimeBlockTablesCitations:
timeout=500.0
def setup(self):
path_for_A = os.sep.join([datasets_path, 'citations', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'citations', 'B.csv'])
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'ID')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'ID')
except AssertionError:
print("Dataset \'anime\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
def time_block_tables_title_1(self):
ob.block_tables(self.A, self.B, 'author', 'author', rem_stop_words=True,
l_output_attrs=['title','author','year','ENTRYTYPE'],
r_output_attrs=['title','author','year','ENTRYTYPE'])
def teardown(self):
del self.A
del self.B
class TimeBlockTablesEbooks:
timeout=500.0
def setup(self):
path_for_A = os.sep.join([datasets_path, 'ebooks', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'ebooks', 'B.csv'])
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'record_id')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'record_id')
except AssertionError:
print("Dataset \'ebooks\' not found. Please visit the project "
"website to download the dataset.")
raise SystemExit
def time_block_tables_author_2(self):
ob.block_tables(self.A, self.B, 'author', 'author', overlap_size=2,
l_output_attrs = ['title', 'author', 'length', 'price'],
r_output_attrs = ['title', 'author', 'length', 'price'])
def time_block_tables_title_2(self):
ob.block_tables(self.A, self.B, 'title', 'title', overlap_size=2,
rem_stop_words=True,
l_output_attrs=['title', 'author', 'publisher', 'date'],
r_output_attrs=['title', 'author', 'publisher', 'date'])
def teardown(self):
del self.A
del self.B
class TimeBlockTablesMusic:
timeout=500.0
def setup(self):
path_for_A = os.sep.join([datasets_path, 'music', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'music', 'B.csv'])
self.l_output_attrs = ['Album_Name', 'Artist_Name', 'CopyRight',
'Released', 'Song_Name', 'Time']
self.r_output_attrs = ['Album_Name', 'Artist_Name', 'Copyright',
'Released', 'Song_Name', 'Time']
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'Sno')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'Sno')
except AssertionError:
print("Dataset \'music\' not found. Please visit the project "
"website to download the dataset.")
raise SystemExit
def time_block_tables_album_name_1(self):
ob.block_tables(self.A, self.B, 'Album_Name', 'Album_Name',
rem_stop_words=True,
l_output_attrs=self.l_output_attrs,
r_output_attrs=self.r_output_attrs)
def teardown(self):
del self.A
del self.B
del self.l_output_attrs
del self.r_output_attrs
class TimeBlockTablesRestaurants:
def setup(self):
path_for_A = os.sep.join([datasets_path, 'restaurants', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'restaurants', 'B.csv'])
self.l_output_attrs = ['NAME', 'PHONENUMBER', 'ADDRESS']
self.r_output_attrs = ['NAME', 'PHONENUMBER', 'ADDRESS']
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'ID')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'ID')
except AssertionError:
print("Dataset \'restaurants\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
def time_block_tables_address_4(self):
ob.block_tables(self.A, self.B, 'ADDRESS', 'ADDRESS',
overlap_size=4,
l_output_attrs=self.l_output_attrs,
r_output_attrs=self.r_output_attrs)
def time_block_tables_name_1(self):
ob.block_tables(self.A, self.B, 'NAME', 'NAME',
l_output_attrs=self.l_output_attrs,
r_output_attrs=self.r_output_attrs)
def teardown(self):
del self.A
del self.B
del self.l_output_attrs
del self.r_output_attrs
class TimeBlockCandsetEbooks:
timeout=3600.0
def setup(self):
path_for_A = os.sep.join([datasets_path, 'ebooks', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'ebooks', 'B.csv'])
try:
A = mg.read_csv_metadata(path_for_A)
mg.set_key(A, 'record_id')
B = mg.read_csv_metadata(path_for_B)
mg.set_key(B, 'record_id')
self.C = ob.block_tables(A, B, 'title', 'title', overlap_size=2,
rem_stop_words=True,
l_output_attrs=['title', 'author', 'publisher', 'date'],
r_output_attrs=['title', 'author', 'publisher', 'date'])
except AssertionError:
print("Dataset \'ebooks\' not found. Please visit the project "
"website to download the dataset.")
raise SystemExit
def time_block_candset_publisher_1(self):
ob.block_candset(self.C, 'publisher', 'publisher', rem_stop_words=True)
def teardown(self):
del self.C
class TimeBlockCandsetMusic:
timeout=3600.0
def setup(self):
path_for_A = os.sep.join([datasets_path, 'music', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'music', 'B.csv'])
try:
A = mg.read_csv_metadata(path_for_A)
mg.set_key(A, 'Sno')
B = mg.read_csv_metadata(path_for_B)
mg.set_key(B, 'Sno')
l_output_attrs = ['Album_Name', 'Artist_Name', 'CopyRight',
'Released', 'Song_Name', 'Time']
r_output_attrs = ['Album_Name', 'Artist_Name', 'Copyright',
'Released', 'Song_Name', 'Time']
self.C = ob.block_tables(A, B, 'Album_Name', 'Album_Name',
rem_stop_words=True,
l_output_attrs=l_output_attrs,
r_output_attrs=r_output_attrs)
except AssertionError:
print("Dataset \'music\' not found. Please visit the project "
"website to download the dataset.")
raise SystemExit
def time_block_candset(self):
ob.block_candset(self.C, 'Artist_Name', 'Artist_Name',
rem_stop_words=True)
def teardown(self):
del self.C
class TimeBlockCandsetMusic2:
timeout=3600.0
def setup(self):
path_for_A = os.sep.join([datasets_path, 'music', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'music', 'B.csv'])
try:
A = mg.read_csv_metadata(path_for_A)
mg.set_key(A, 'Sno')
B = mg.read_csv_metadata(path_for_B)
mg.set_key(B, 'Sno')
l_output_attrs = ['Album_Name', 'Artist_Name', 'CopyRight',
'Released', 'Song_Name', 'Time']
r_output_attrs = ['Album_Name', 'Artist_Name', 'Copyright',
'Released', 'Song_Name', 'Time']
C = ob.block_tables(A, B, 'Album_Name', 'Album_Name',
rem_stop_words=True,
l_output_attrs=l_output_attrs,
r_output_attrs=r_output_attrs)
self.D = ob.block_candset(C, 'Artist_Name', 'Artist_Name',
rem_stop_words=True)
except AssertionError:
print("Dataset \'music\' not found. Please visit the project "
"website to download the dataset.")
raise SystemExit
def time_block_candset(self):
ob.block_candset(self.D, 'Song_Name', 'Song_Name', rem_stop_words=True)
def teardown(self):
del self.D
| 40.661765 | 93 | 0.563472 |
536639490d3e6ce59889b6f81b8c0f2125acadbf
| 6,307 |
py
|
Python
|
.ipynb_checkpoints/train_misc-checkpoint.py
|
minhtannguyen/ffjord
|
f3418249eaa4647f4339aea8d814cf2ce33be141
|
[
"MIT"
] | 1 |
2020-12-20T09:43:20.000Z
|
2020-12-20T09:43:20.000Z
|
.ipynb_checkpoints/train_misc-checkpoint.py
|
minhtannguyen/ffjord
|
f3418249eaa4647f4339aea8d814cf2ce33be141
|
[
"MIT"
] | null | null | null |
.ipynb_checkpoints/train_misc-checkpoint.py
|
minhtannguyen/ffjord
|
f3418249eaa4647f4339aea8d814cf2ce33be141
|
[
"MIT"
] | null | null | null |
import six
import math
import lib.layers.wrappers.cnf_regularization as reg_lib
import lib.spectral_norm as spectral_norm
import lib.layers as layers
from lib.layers.odefunc import divergence_bf, divergence_approx
def standard_normal_logprob(z):
logZ = -0.5 * math.log(2 * math.pi)
return logZ - z.pow(2) / 2
def set_cnf_options(args, model):
def _set(module):
if isinstance(module, layers.CNF):
# Set training settings
module.solver = args.solver
module.atol = args.atol
module.rtol = args.rtol
if args.step_size is not None:
module.solver_options['step_size'] = args.step_size
# If using fixed-grid adams, restrict order to not be too high.
if args.solver in ['fixed_adams', 'explicit_adams']:
module.solver_options['max_order'] = 4
# Set the test settings
module.test_solver = args.test_solver if args.test_solver else args.solver
module.test_atol = args.test_atol if args.test_atol else args.atol
module.test_rtol = args.test_rtol if args.test_rtol else args.rtol
if isinstance(module, layers.ODEfunc):
module.rademacher = args.rademacher
module.residual = args.residual
model.apply(_set)
def override_divergence_fn(model, divergence_fn):
def _set(module):
if isinstance(module, layers.ODEfunc):
if divergence_fn == "brute_force":
module.divergence_fn = divergence_bf
elif divergence_fn == "approximate":
module.divergence_fn = divergence_approx
model.apply(_set)
def count_nfe(model):
class AccNumEvals(object):
def __init__(self):
self.num_evals = 0
def __call__(self, module):
if isinstance(module, layers.CNF):
self.num_evals += module.num_evals()
accumulator = AccNumEvals()
model.apply(accumulator)
return accumulator.num_evals
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def count_total_time(model):
class Accumulator(object):
def __init__(self):
self.total_time = 0
def __call__(self, module):
if isinstance(module, layers.CNF):
self.total_time = self.total_time + module.sqrt_end_time * module.sqrt_end_time
accumulator = Accumulator()
model.apply(accumulator)
return accumulator.total_time
def add_spectral_norm(model, logger=None):
"""Applies spectral norm to all modules within the scope of a CNF."""
def apply_spectral_norm(module):
if 'weight' in module._parameters:
if logger: logger.info("Adding spectral norm to {}".format(module))
spectral_norm.inplace_spectral_norm(module, 'weight')
def find_cnf(module):
if isinstance(module, layers.CNF):
module.apply(apply_spectral_norm)
else:
for child in module.children():
find_cnf(child)
find_cnf(model)
def spectral_norm_power_iteration(model, n_power_iterations=1):
def recursive_power_iteration(module):
if hasattr(module, spectral_norm.POWER_ITERATION_FN):
getattr(module, spectral_norm.POWER_ITERATION_FN)(n_power_iterations)
model.apply(recursive_power_iteration)
REGULARIZATION_FNS = {
"l1int": reg_lib.l1_regularzation_fn,
"l2int": reg_lib.l2_regularzation_fn,
"dl2int": reg_lib.directional_l2_regularization_fn,
"JFrobint": reg_lib.jacobian_frobenius_regularization_fn,
"JdiagFrobint": reg_lib.jacobian_diag_frobenius_regularization_fn,
"JoffdiagFrobint": reg_lib.jacobian_offdiag_frobenius_regularization_fn,
}
INV_REGULARIZATION_FNS = {v: k for k, v in six.iteritems(REGULARIZATION_FNS)}
def append_regularization_to_log(log_message, regularization_fns, reg_states):
for i, reg_fn in enumerate(regularization_fns):
log_message = log_message + " | " + INV_REGULARIZATION_FNS[reg_fn] + ": {:.8f}".format(reg_states[i].item())
return log_message
def create_regularization_fns(args):
regularization_fns = []
regularization_coeffs = []
for arg_key, reg_fn in six.iteritems(REGULARIZATION_FNS):
if getattr(args, arg_key) is not None:
regularization_fns.append(reg_fn)
regularization_coeffs.append(eval("args." + arg_key))
regularization_fns = tuple(regularization_fns)
regularization_coeffs = tuple(regularization_coeffs)
return regularization_fns, regularization_coeffs
def get_regularization(model, regularization_coeffs):
if len(regularization_coeffs) == 0:
return None
acc_reg_states = tuple([0.] * len(regularization_coeffs))
for module in model.modules():
if isinstance(module, layers.CNF):
acc_reg_states = tuple(acc + reg for acc, reg in zip(acc_reg_states, module.get_regularization_states()))
return acc_reg_states
def build_model_tabular(args, dims, regularization_fns=None):
hidden_dims = tuple(map(int, args.dims.split("-")))
def build_cnf():
diffeq = layers.ODEnet(
hidden_dims=hidden_dims,
input_shape=(dims,),
strides=None,
conv=False,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
)
odefunc = layers.ODEfunc(
diffeq=diffeq,
divergence_fn=args.divergence_fn,
residual=args.residual,
rademacher=args.rademacher,
)
cnf = layers.CNF(
odefunc=odefunc,
T=args.time_length,
train_T=args.train_T,
regularization_fns=regularization_fns,
solver=args.solver,
)
return cnf
chain = [build_cnf() for _ in range(args.num_blocks)]
if args.batch_norm:
bn_layers = [layers.MovingBatchNorm1d(dims, bn_lag=args.bn_lag) for _ in range(args.num_blocks)]
bn_chain = [layers.MovingBatchNorm1d(dims, bn_lag=args.bn_lag)]
for a, b in zip(chain, bn_layers):
bn_chain.append(a)
bn_chain.append(b)
chain = bn_chain
model = layers.SequentialFlow(chain)
set_cnf_options(args, model)
return model
| 31.378109 | 117 | 0.666244 |
59040cd0d8cf5a97b8faef99fad389e6a41a1a36
| 7,166 |
py
|
Python
|
internal/notes/builtin-SAVE/packages/abinit/package.py
|
HPCToolkit/hpctest
|
5ff4455582bf39e75530a31badcf6142081b386b
|
[
"BSD-3-Clause"
] | 1 |
2019-01-17T20:07:19.000Z
|
2019-01-17T20:07:19.000Z
|
internal/notes/builtin-SAVE/packages/abinit/package.py
|
HPCToolkit/hpctest
|
5ff4455582bf39e75530a31badcf6142081b386b
|
[
"BSD-3-Clause"
] | null | null | null |
internal/notes/builtin-SAVE/packages/abinit/package.py
|
HPCToolkit/hpctest
|
5ff4455582bf39e75530a31badcf6142081b386b
|
[
"BSD-3-Clause"
] | 2 |
2019-08-06T18:13:57.000Z
|
2021-11-05T18:19:49.000Z
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
#
# Author: Matteo Giantomassi <matteo.giantomassiNOSPAM AT uclouvain.be>
# Date: October 11, 2016
from spack import *
class Abinit(AutotoolsPackage):
"""ABINIT is a package whose main program allows one to find the total
energy, charge density and electronic structure of systems made of
electrons and nuclei (molecules and periodic solids) within
Density Functional Theory (DFT), using pseudopotentials and a planewave
or wavelet basis.
ABINIT also includes options to optimize the geometry according to the
DFT forces and stresses, or to perform molecular dynamics
simulations using these forces, or to generate dynamical matrices,
Born effective charges, and dielectric tensors, based on Density-Functional
Perturbation Theory, and many more properties. Excited states can be
computed within the Many-Body Perturbation Theory (the GW approximation and
the Bethe-Salpeter equation), and Time-Dependent Density Functional Theory
(for molecules). In addition to the main ABINIT code, different utility
programs are provided.
"""
homepage = 'http://www.abinit.org'
url = 'http://ftp.abinit.org/abinit-8.0.8b.tar.gz'
version('8.2.2', '5f25250e06fdc0815c224ffd29858860')
# Versions before 8.0.8b are not supported.
version('8.0.8b', 'abc9e303bfa7f9f43f95598f87d84d5d')
variant('mpi', default=True,
description='Builds with MPI support. Requires MPI2+')
variant('openmp', default=False,
description='Enables OpenMP threads. Use threaded FFTW3')
variant('scalapack', default=False,
description='Enables scalapack support. Requires MPI')
# variant('elpa', default=False,
# description='Uses elpa instead of scalapack. Requires MPI')
# TODO: To be tested.
# It was working before the last `git pull` but now all tests crash.
# For the time being, the default is netcdf3 and the internal fallbacks
# FIXME: rename (trio?) and use multivalued variants to cover
# --with-trio-flavor={netcdf, none}
# Note that Abinit@8: does not support etsf_io anymore because it is not
# compatible with HDF5 and MPI-IO
variant('hdf5', default=False,
description='Enables HDF5+Netcdf4 with MPI. WARNING: experimental')
# Add dependencies
# currently one cannot forward options to virtual packages, see #1712.
# depends_on('blas', when='~openmp')
# depends_on('blas+openmp', when='+openmp')
depends_on('blas')
depends_on('lapack')
# Require MPI2+
depends_on('mpi@2:', when='+mpi')
depends_on('scalapack', when='+scalapack+mpi')
# depends_on('elpa~openmp', when='+elpa+mpi~openmp')
# depends_on('elpa+openmp', when='+elpa+mpi+openmp')
depends_on('fftw+float', when='~openmp')
depends_on('fftw+float+openmp', when='+openmp')
depends_on('netcdf-fortran', when='+hdf5')
depends_on('hdf5+mpi', when='+mpi+hdf5') # required for NetCDF-4 support
# pin libxc version
depends_on("[email protected]")
# Cannot ask for +scalapack if it does not depend on MPI
conflicts('+scalapack', when='~mpi')
# Elpa is a substitute for scalapack and needs mpi
# conflicts('+elpa', when='~mpi')
# conflicts('+elpa', when='+scalapack')
def configure_args(self):
spec = self.spec
options = []
oapp = options.append
if '+mpi' in spec:
# MPI version:
# let the configure script auto-detect MPI support from mpi_prefix
oapp('--with-mpi-prefix={0}'.format(spec['mpi'].prefix))
oapp('--enable-mpi=yes')
oapp('--enable-mpi-io=yes')
# Activate OpenMP in Abinit Fortran code.
if '+openmp' in spec:
oapp('--enable-openmp=yes')
# BLAS/LAPACK/SCALAPACK-ELPA
linalg = spec['lapack'].libs + spec['blas'].libs
if '+scalapack' in spec:
oapp('--with-linalg-flavor=custom+scalapack')
linalg = spec['scalapack'].libs + linalg
# elif '+elpa' in spec:
else:
oapp('--with-linalg-flavor=custom')
oapp('--with-linalg-libs={0}'.format(linalg.ld_flags))
# FFTW3: use sequential or threaded version if +openmp
fftflavor, fftlibs = 'fftw3', '-lfftw3 -lfftw3f'
if '+openmp' in spec:
fftflavor = 'fftw3-threads'
fftlibs = '-lfftw3_omp -lfftw3 -lfftw3f'
options.extend([
'--with-fft-flavor=%s' % fftflavor,
'--with-fft-incs=-I%s' % spec['fftw'].prefix.include,
'--with-fft-libs=-L%s %s' % (spec['fftw'].prefix.lib, fftlibs),
])
oapp('--with-dft-flavor=atompaw+libxc')
# LibXC library
libxc = spec['libxc:fortran']
options.extend([
'with_libxc_incs={0}'.format(libxc.headers.cpp_flags),
'with_libxc_libs={0}'.format(libxc.libs.ld_flags + ' -lm')
])
# Netcdf4/HDF5
if '+hdf5' in spec:
oapp('--with-trio-flavor=netcdf')
# Since version 8, Abinit started to use netcdf4 + hdf5 and we have
# to link with the high level HDF5 library
hdf5 = spec['hdf5:hl']
netcdff = spec['netcdf-fortran:shared']
options.extend([
'--with-netcdf-incs={0}'.format(netcdff.headers.cpp_flags),
'--with-netcdf-libs={0}'.format(
netcdff.libs.ld_flags + ' ' + hdf5.libs.ld_flags
),
])
else:
# In Spack we do our best to avoid building any internally provided
# dependencies, such as netcdf3 in this case.
oapp('--with-trio-flavor=none')
return options
def check(self):
"""This method is called after the build phase if tests have been
explicitly activated by user.
"""
make('check')
make('tests_in')
| 39.373626 | 79 | 0.63285 |
de476dc272caf4f210b2909fd0ef298cccb3c3e3
| 1,622 |
py
|
Python
|
python/raspberrypi/examples/get_acceleration/get_acceleration.py
|
cdjq/DFRobot_LIS331HH
|
a9903b1c043e5f2f2c7a9acf413a496cc1d6189e
|
[
"MIT"
] | null | null | null |
python/raspberrypi/examples/get_acceleration/get_acceleration.py
|
cdjq/DFRobot_LIS331HH
|
a9903b1c043e5f2f2c7a9acf413a496cc1d6189e
|
[
"MIT"
] | null | null | null |
python/raspberrypi/examples/get_acceleration/get_acceleration.py
|
cdjq/DFRobot_LIS331HH
|
a9903b1c043e5f2f2c7a9acf413a496cc1d6189e
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
@file get_acceleration.py
@brief Get the acceleration in x, y, z directions
@copyright Copyright (c) 2010 DFRobot Co.Ltd (http://www.dfrobot.com)
@licence The MIT License (MIT)
@author [fengli]([email protected])
@version V1.0
@date 2021-01-16
@get from https://www.dfrobot.com
@https://github.com/DFRobot/DFRobot_LIS331HH
"""
import sys
sys.path.append("../..") # set system path to top
from DFRobot_LIS331HH import *
import time
#如果你想要用SPI驱动此模块,打开下面两行的注释,并通过SPI连接好模块和树莓派
#RASPBERRY_PIN_CS = 27 #Chip selection pin when SPI is selected
#acce = DFRobot_LIS331HH_SPI(RASPBERRY_PIN_CS)
#如果你想要应IIC驱动此模块,打卡下面三行的注释,并通过I2C连接好模块和树莓派
I2C_MODE = 0x01 #default use I2C1
ADDRESS_0 = 0x19 #I2C address
acce = DFRobot_LIS331HH_I2C(I2C_MODE ,ADDRESS_0)
#Chip initialization
acce.begin()
#Get chip id
print("chip id :")
print(acce.get_id())
'''
set range:Range(g)
RANGE_6G = 6#/**<±6G>*/
RANGE_12G = 12,#/**<±12G>*/
RANGE_24G = 24#/**<±24G>*/
'''
acce.set_range(acce.RANGE_6G)
'''
Set data measurement rate
POWERDOWN_0HZ
LOWPOWER_HALFHZ
LOWPOWER_1HZ
LOWPOWER_2HZ
LOWPOWER_5HZ
LOWPOWER_10HZ
NORMAL_50HZ
NORMAL_100HZ
NORMAL_400HZ
NORMAL_1000HZ
'''
acce.set_acquire_rate(acce.NORMAL_50HZ)
time.sleep(0.1)
while True:
#Get the acceleration in the three directions of xyz
x,y,z = acce.read_acce_xyz()
time.sleep(1)
print("Acceleration [X = %.2d g,Y = %.2d g,Z = %.2d g]"%(x,y,z))
| 24.953846 | 77 | 0.639334 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.