hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e30fe439a620f6aef4d8e2cc25ad9a7d64cdb376
| 1,805 |
py
|
Python
|
cached_contingency/KeyValueStore.py
|
MrTomRod/cached-contingency
|
951431218c47ed246ba1fb655b581a99a5cfde00
|
[
"MIT"
] | null | null | null |
cached_contingency/KeyValueStore.py
|
MrTomRod/cached-contingency
|
951431218c47ed246ba1fb655b581a99a5cfde00
|
[
"MIT"
] | null | null | null |
cached_contingency/KeyValueStore.py
|
MrTomRod/cached-contingency
|
951431218c47ed246ba1fb655b581a99a5cfde00
|
[
"MIT"
] | null | null | null |
import os
import logging
import sqlite3
from typing import Optional
import pandas as pd
class KeyValueStore:
table_name: str
def __init__(self, table_name, db_path: str = None):
self.table_name = table_name
if db_path is None:
if 'KEY_VALUE_STORE_DB' in os.environ:
db_path = os.environ['KEY_VALUE_STORE_DB']
else:
db_path = os.path.expanduser('~/.cache/keyvaluestore.db')
self._db_path = db_path
self.con, self.cur = self.get_cur()
self.create_db()
def __str__(self):
return f'KeyValueStore {self.table_name} ({self._db_path})'
def get_cur(self):
con = sqlite3.connect(self._db_path)
cur = con.cursor()
return con, cur
def __del__(self):
self.cur.close()
self.con.close()
def create_db(self):
raise NotImplementedError(f'Users of the abstract class {self.__class__} must implement this function!')
@staticmethod
def list_to_string(l) -> str:
return ', '.join(f"'{e}'" for e in l)
@staticmethod
def list_to_string_bracket(l):
return ', '.join(f"('{e}')" for e in l)
def _create_db(self, columns: {str: str}, pk_col: str):
columns = ', '.join(f'{col_name} {col_type}' for col_name, col_type in columns.items())
sql = f'''
CREATE TABLE IF NOT EXISTS {self.table_name} (
{columns},
PRIMARY KEY ({pk_col})
);
'''
try:
self.cur.execute(sql)
except sqlite3.OperationalError as e:
logging.warning(f'Failed to run this SQL command on db {self._db_path}:\n{sql}')
raise e
def drop_db(self):
self.cur.execute(f'''DROP TABLE {self.table_name}''')
| 28.650794 | 112 | 0.587812 |
bff9f2de962a40707f74ad0f9200d217ab4b4d8f
| 5,690 |
py
|
Python
|
regionmask/tests/test_utils.py
|
pablosebastiasaez/regionmask
|
c7d797df8b2c3020bc7d045e7fea1487f75dd28b
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
regionmask/tests/test_utils.py
|
pablosebastiasaez/regionmask
|
c7d797df8b2c3020bc7d045e7fea1487f75dd28b
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
regionmask/tests/test_utils.py
|
pablosebastiasaez/regionmask
|
c7d797df8b2c3020bc7d045e7fea1487f75dd28b
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
import numpy as np
import pytest
from regionmask.core.utils import (
_create_dict_of_numbered_string,
_equally_spaced_on_split_lon,
_find_splitpoint,
_is_180,
_is_numeric,
_maybe_to_dict,
_sanitize_names_abbrevs,
create_lon_lat_dataarray_from_bounds,
equally_spaced,
)
@pytest.mark.parametrize(
"numbers, string, expected",
[
[[0, 1], "str", {0: "str0", 1: "str1"}],
[[1, 2], "str", {1: "str1", 2: "str2"}],
[[1, 2], "Region", {1: "Region1", 2: "Region2"}],
[[0, 1, 2], "r", {0: "r0", 1: "r1", 2: "r2"}],
],
)
def test_create_dict_of_numbered_string(numbers, string, expected):
result = _create_dict_of_numbered_string(numbers, string)
assert isinstance(result, dict)
assert result == expected
@pytest.mark.parametrize(
"keys, values, expected",
[
[[0, 1], ["a", "b"], {0: "a", 1: "b"}],
[[1, 2], ["a", "b"], {1: "a", 2: "b"}],
[[0, 1], {0: "a", 1: "b"}, {0: "a", 1: "b"}],
[[1, 2], {0: "a", 1: "b"}, {0: "a", 1: "b"}],
],
)
def test_maybe_to_dict(keys, values, expected):
result = _maybe_to_dict(keys, values)
assert isinstance(result, dict)
assert result == expected
@pytest.mark.parametrize(
"numbers, values, default, expected",
[
[[0, 1], ["a", "b"], "r", {0: "a", 1: "b"}],
[[0, 1], {1: "a", 2: "b"}, "r", {1: "a", 2: "b"}],
[[0, 1], None, "r", {0: "r0", 1: "r1"}],
[[0, 1], None, "Region", {0: "Region0", 1: "Region1"}],
[[0, 1], "Region", "r", {0: "Region0", 1: "Region1"}],
],
)
def test_sanitize_names_abbrevs(numbers, values, default, expected):
result = _sanitize_names_abbrevs(numbers, values, default)
assert isinstance(result, dict)
assert result == expected
def test_sanitize_names_abbrevs_unequal_length():
with pytest.raises(ValueError, match="not have the same length"):
_sanitize_names_abbrevs([0, 1], ["A"], "default")
def test_is_180():
assert _is_180(-180, 180)
assert not _is_180(0, 180.1)
assert not _is_180(0, 180.01)
# allow for small rounding errors
assert _is_180(-180.0000002, 180.0000002)
with pytest.raises(ValueError, match="lon has both data that is larger than 180"):
_is_180(-1, 181)
@pytest.mark.parametrize("lon_vals", [(-161, -29, 2), (-180, 181, 2)])
@pytest.mark.parametrize("lat_vals", [(75, 13, -2), (90, -91, -2)])
def test_create_lon_lat_dataarray_from_bounds(lon_vals, lat_vals):
# use "+" because x(*a, *b) is not valid in python 2.7
result = create_lon_lat_dataarray_from_bounds(*lon_vals + lat_vals)
for coord in ["lon", "lat", "lon_bnds", "lat_bnds", "LON", "LAT"]:
assert coord in result.coords
def _check_coords(vals, name):
bnds_expected = np.arange(*vals)
expected = (bnds_expected[:-1] + bnds_expected[1:]) / 2
assert np.allclose(result[name], expected)
assert np.allclose(result[name + "_bnds"], bnds_expected)
return expected
lon = _check_coords(lon_vals, "lon")
lat = _check_coords(lat_vals, "lat")
LON_EXPECTED, LAT_EXPECTED = np.meshgrid(lon, lat)
np.allclose(result["LON"], LON_EXPECTED)
np.allclose(result["LAT"], LAT_EXPECTED)
def test_is_numeric():
assert _is_numeric([1, 2, 3])
assert not _is_numeric(["a"])
def test_equally_spaced():
np.random.seed(0)
equal = np.arange(10)
grid_2D = np.arange(10).reshape(2, 5)
un_equal = [0, 1, 2, 4, 5, 6]
assert equally_spaced(equal)
assert not equally_spaced(grid_2D)
assert not equally_spaced(un_equal)
assert not equally_spaced(1)
assert equally_spaced(equal, equal)
assert not equally_spaced(grid_2D, equal)
assert not equally_spaced(equal, grid_2D)
assert not equally_spaced(grid_2D, grid_2D)
assert not equally_spaced(un_equal, equal)
assert not equally_spaced(equal, un_equal)
assert not equally_spaced(un_equal, un_equal)
assert not equally_spaced(1, equal)
assert not equally_spaced(equal, 1)
assert not equally_spaced(1, 1)
close_to_equal = equal + np.random.randn(*equal.shape) * 10 ** -6
assert equally_spaced(close_to_equal, close_to_equal)
def test__equally_spaced_on_split_lon():
np.random.seed(0)
equal = np.arange(10)
grid_2D = np.arange(10).reshape(2, 5)
un_equal = [0, 1, 2, 4, 5, 6.1]
equal_split = np.asarray([5, 6, 7, 8, 9, 10, 1, 2, 3, 4])
assert _equally_spaced_on_split_lon(equal_split)
assert not _equally_spaced_on_split_lon([10, 1, 2, 3])
assert not _equally_spaced_on_split_lon([1, 2, 3, 10])
assert not _equally_spaced_on_split_lon(equal)
assert not _equally_spaced_on_split_lon(grid_2D)
assert not _equally_spaced_on_split_lon(un_equal)
assert not _equally_spaced_on_split_lon(1)
close_to_equal = equal + np.random.randn(*equal.shape) * 10 ** -6
close_to_equal_split = equal_split + np.random.randn(*equal_split.shape) * 10 ** -6
assert not _equally_spaced_on_split_lon(close_to_equal)
assert _equally_spaced_on_split_lon(close_to_equal_split)
def test_find_splitpoint():
np.random.seed(0)
equal_split = np.asarray([5, 6, 7, 8, 9, 10, 1, 2, 3, 4])
close_to_equal_split = equal_split + np.random.randn(*equal_split.shape) * 10 ** -6
assert _find_splitpoint(equal_split) == 6
assert _find_splitpoint(close_to_equal_split) == 6
with pytest.raises(ValueError, match="more or less than one split point found"):
_find_splitpoint([0, 1, 2, 3])
with pytest.raises(ValueError, match="more or less than one split point found"):
_find_splitpoint([0, 1, 3, 4, 6, 7])
| 28.592965 | 87 | 0.641828 |
3d1f6a69380120f6387c09eb2734310469ec6cf5
| 1,037 |
py
|
Python
|
nitro/resource/base/base_responses.py
|
HanseMerkur/nitro-python
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
[
"Apache-2.0"
] | 2 |
2020-08-24T18:04:22.000Z
|
2020-08-24T18:04:47.000Z
|
nitro/resource/base/base_responses.py
|
HanseMerkur/nitro-python
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
[
"Apache-2.0"
] | null | null | null |
nitro/resource/base/base_responses.py
|
HanseMerkur/nitro-python
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nitro.resource.base.base_response import base_response
class base_responses:
"""base_response is a abstract base class for all the netscaler config/stat response classes."""
def __init__(self, length=0):
self.errorcode = 0
self.message = ""
self.sessionid = ""
self.severity = ""
self.response = []
self.response = [ base_response() for _ in range(length) ]
| 37.035714 | 100 | 0.698168 |
e376fc8183ea9f1da318c52d637d9ae1e3ac1689
| 2,651 |
py
|
Python
|
src/common/init_argparse.py
|
xiaoquqi/gitlab-sync
|
6b438c2a452ac7903ef331f7770d8d6dcbb0bba3
|
[
"Apache-2.0"
] | 1 |
2022-03-15T07:16:53.000Z
|
2022-03-15T07:16:53.000Z
|
src/common/init_argparse.py
|
bagechashu/gitlab-sync
|
f37ff03341dfa21dcc6492979f5b4b784baa552d
|
[
"Apache-2.0"
] | null | null | null |
src/common/init_argparse.py
|
bagechashu/gitlab-sync
|
f37ff03341dfa21dcc6492979f5b4b784baa552d
|
[
"Apache-2.0"
] | 1 |
2022-03-15T07:16:44.000Z
|
2022-03-15T07:16:44.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Common modules for parse script arguments"""
import argparse
import logging
import sys
def parse_sys_args(argv):
"""Parses commaond-line arguments"""
parser = argparse.ArgumentParser(
description="Gitlab backup tool in group level")
parser.add_argument(
"--local", action="store", dest="local",
required=True, help="Local gitlab http url, "
"ex: https://local.gitlab.com")
parser.add_argument(
"--local-token", action="store", dest="local_token",
required=True, help="Local gitlab private token.")
parser.add_argument(
"--local-group", action="store", dest="local_group",
required=True, help="Local github group for reading.")
parser.add_argument(
"--remote", action="store", dest="remote",
required=True, help="Remote gitlab http url, "
"ex: https://remote.gitlab.com")
parser.add_argument(
"--remote-token", action="store", dest="remote_token",
required=True, help="Remote gitlab private token")
parser.add_argument(
"--remote-group", action="store", dest="remote_group",
required=False, help="Target group of remote github for backup.")
parser.add_argument(
"--push-url", action="store", dest="push_url",
required=True, help="Remote push url for backup target")
parser.add_argument(
"--force-push", action="store_true",
dest="force_push", default=True,
required=False, help="Force push to remote by default")
parser.add_argument(
"--ignore-branches", action="store", dest="ignore_branches",
required=False, help="Not sync for ignore branches, "
"ex: cherry-pick,dev,temp")
parser.add_argument(
"--allow-branches", action="store", dest="allow_branches",
required=False, help="Only sync for allow branches, "
"ex: master,main,qa. "
"if not given, sync all branches."
"If ignore branches is given, the"
"priority is higher than this argument")
parser.add_argument(
"-d", "--debug", action="store_true", dest="debug",
default=False, help="Enable debug message.")
parser.add_argument(
"-v", "--verbose", action="store_true", dest="verbose",
default=True, help="Show message in standard output.")
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
else:
return parser.parse_args(argv[1:])
| 40.784615 | 73 | 0.596756 |
f8a65a5633d5a49888e9831960b058a9d6b73163
| 2,269 |
py
|
Python
|
src/sqlfluff/rules/L027.py
|
fbb-oc/sqlfluff
|
f50e72b748dcf700483d0e937aa2abcfb0a56e9e
|
[
"MIT"
] | 1 |
2022-03-03T02:29:11.000Z
|
2022-03-03T02:29:11.000Z
|
src/sqlfluff/rules/L027.py
|
clairetaylor352/sqlfluff
|
62900332228db323da323ce20df0c5e17ba9fcbf
|
[
"MIT"
] | 1 |
2021-12-08T18:40:19.000Z
|
2021-12-08T18:40:19.000Z
|
src/sqlfluff/rules/L027.py
|
derickl/sqlfluff
|
ea2341ffa5325757acfa02cc9f7a07ac78b7a6c8
|
[
"MIT"
] | null | null | null |
"""Implementation of Rule L027."""
from sqlfluff.core.rules.base import LintResult
from sqlfluff.rules.L020 import Rule_L020
class Rule_L027(Rule_L020):
"""References should be qualified if select has more than one referenced table/view.
.. note::
Except if they're present in a ``USING`` clause.
**Anti-pattern**
In this example, the reference ``vee`` has not been declared,
and the variables ``a`` and ``b`` are potentially ambiguous.
.. code-block:: sql
SELECT a, b
FROM foo
LEFT JOIN vee ON vee.a = foo.a
**Best practice**
Add the references.
.. code-block:: sql
SELECT foo.a, vee.b
FROM foo
LEFT JOIN vee ON vee.a = foo.a
"""
def _lint_references_and_aliases(
self,
table_aliases,
standalone_aliases,
references,
col_aliases,
using_cols,
parent_select,
):
# Do we have more than one? If so, all references should be qualified.
if len(table_aliases) <= 1:
return None
# A buffer to keep any violations.
violation_buff = []
# Check all the references that we have.
for r in references:
this_ref_type = r.qualification()
# Discard column aliases that
# refer to the current column reference.
col_alias_names = [
c.alias_identifier_name
for c in col_aliases
if r not in c.column_reference_segments
]
if (
this_ref_type == "unqualified"
# Allow unqualified columns that
# are actually aliases defined
# in a different select clause element.
and r.raw not in col_alias_names
# Allow columns defined in a USING expression.
and r.raw not in using_cols
):
violation_buff.append(
LintResult(
anchor=r,
description=f"Unqualified reference {r.raw!r} found in "
"select with more than one referenced table/view.",
)
)
return violation_buff or None
| 29.467532 | 88 | 0.552226 |
4e2bd4da13bca4bdfd1452b68ebf14ab11d5eb84
| 1,744 |
py
|
Python
|
trainer_mask.py
|
salmanali88/caffe
|
fe8da0cbdf2a391c84e841c0623ac55cd8228794
|
[
"BSD-2-Clause"
] | 3 |
2016-09-20T07:13:45.000Z
|
2018-11-16T08:01:36.000Z
|
trainer_mask.py
|
salmanali88/caffe
|
fe8da0cbdf2a391c84e841c0623ac55cd8228794
|
[
"BSD-2-Clause"
] | null | null | null |
trainer_mask.py
|
salmanali88/caffe
|
fe8da0cbdf2a391c84e841c0623ac55cd8228794
|
[
"BSD-2-Clause"
] | 12 |
2016-01-29T03:49:55.000Z
|
2020-04-20T02:53:08.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import caffe
from pylab import *
import vis_utils as vu
def data_unit(net, file_name):
n, c, h, w = net.blobs['data'].data.shape
plt.subplot(131)
plt.title('Original Image')
plt.axis('off')
vu.vis_square(net.blobs['data'].data.transpose(0, 2, 3, 1))
plt.subplot(132)
plt.title('Mask_output')
plt.axis('off')
vu.vis_square(net.blobs['mask_output'].data.transpose(0, 2, 3, 1))
plt.subplot(133)
plt.axis('off')
plt.title('Correctness')
acc = np.zeros((n, h, w, 3))
gt_label = net.blobs['label'].data
est_label = np.argmax(net.blobs['loss3/classifier'].data, axis=1)
err = (est_label <> gt_label)
ind = np.array(range(n))[err]
for i in ind:
acc[i] = np.ones((h, w, 3))
plt.imshow(vu.vis_grid(acc))
plt.gca().axis('off')
plt.savefig(file_name+'.jpg', dpi = 1000)
plt.close()
caffe_root = './'
niter = 100000
display = 10
# losses will also be stored in the log
train_loss = np.zeros(niter)
caffe.set_device(0)
caffe.set_mode_gpu()
# We create a solver that fine-tunes from a previously trained network.
solver = caffe.SGDSolver(caffe_root + 'models/CUB_googLeNet_Mask/solver.prototxt')
solver.net.copy_from(caffe_root + 'models/bvlc_googlenet/bvlc_googlenet.caffemodel')
# We run the solver for niter times, and record the training loss.
for it in range(niter):
solver.step(1) # SGD by Caffe
# store the train loss
train_loss[it] = solver.net.blobs['loss3/loss3'].data
if it % display == 0:
print 'iter %d, finetune_loss=%f' % (it, train_loss[it])
if it % 100 == 0:
data_unit(solver.net, 'logs/'+str(it))
print solver.net.blobs['loc_mm'].data[0]
print 'done'
| 26.830769 | 84 | 0.662844 |
983f61214ed5fbed634d51133a18c9ddf01e9950
| 5,275 |
py
|
Python
|
cliffs/command.py
|
michalwa/py-cliffs
|
aaf089d1b0e05abb15e58bca7670c2632f680ae3
|
[
"MIT"
] | 1 |
2020-05-28T19:52:35.000Z
|
2020-05-28T19:52:35.000Z
|
cliffs/command.py
|
michalwa/py-clifford
|
aaf089d1b0e05abb15e58bca7670c2632f680ae3
|
[
"MIT"
] | 1 |
2021-02-18T20:29:34.000Z
|
2021-02-18T20:30:55.000Z
|
cliffs/command.py
|
michalwa/py-cliffs
|
aaf089d1b0e05abb15e58bca7670c2632f680ae3
|
[
"MIT"
] | 1 |
2020-06-20T21:05:54.000Z
|
2020-06-20T21:05:54.000Z
|
from typing import Optional, Callable, Iterable
from inspect import signature
from .utils import instance_or_kwargs
from .syntax_tree import Node
from .call_lexer import CallLexer
from .call_match import *
from .call_matcher import CallMatcher
import textwrap
class TooManyArguments(CallMatchFail):
pass
class Command:
"""Matches command calls against its syntax and controls callback dispatch."""
def __init__(self, syntax: Node, callback: Callable, **kwargs):
"""Initializes a command.
Parameters
----------
* syntax: `Node` - The root of the syntax tree for this command.
* callback: `(...) -> *` - The callback.
Keyword arguments
-----------------
* lexer: `CallLexer` - The lexer to use to tokenize incoming calls.
* matcher: `CallMatcher` - The matcher to use to match calls against the syntax of this command.
* description: `str` - The description to include in the usage help message. Ignored if hidden is True.
* hidden: `bool` - Whether the usage help message should exclude this command entirely.
All keyword arguments will be saved in `kwargs`.
"""
self.syntax = syntax
self.callback = callback
self.kwargs = kwargs
self.lexer = instance_or_kwargs(kwargs.get('lexer', {}), CallLexer)
self.matcher = instance_or_kwargs(kwargs.get('matcher', {}), CallMatcher)
self.description: Optional[str] = kwargs.get('description', None)
self.hidden: Optional[str] = kwargs.get('hidden', False)
def begin_match(self, call: str) -> CallMatch:
return CallMatch(call, list(self.lexer.tokenize(call)))
def match(self, match: CallMatch):
"""Tries to match the given call to this command's syntax and populates
the given match instance.
Parameters
----------
* call: `str` - The call to match.
* match: `CallMatch` - The match to populate.
Raises
------
* `CallMatchFail` when matching fails or the command tokens are not fully
exhausted at the end of the match.
"""
try:
self.syntax.match(match, self.matcher)
except CallMatchFail as e:
e.command = self
raise e
if match.has_tokens():
# Tokens were left in the match, which means some nodes possibly
# didn't match - we look for a hint in the match and raise it if it exists
if match.hint is not None:
if isinstance(match.hint, CallMatchFail):
match.hint.command = self
raise match.hint
# Or we raise the generic error
e = TooManyArguments('Too many arguments')
e.command = self
raise e
def execute(self, match: CallMatch, callback_args={}) -> object:
"""Executes the command callback with the given match. By default,
the match must be the result of calling the `match()` method of this object.
Parameters
----------
* match: `CallMatch` - The match to dispatch to the callback.
* callback_args: `dict` (optional) - Additional arguments to pass to the callback.
Defaults to none.
Returns
-------
* Whatever is returned by the callback.
"""
# Pass only those args that are required by the callback signature
sig = signature(self.callback)
callback_args |= {'match': match, 'command': self}
callback_args |= match._params
args = dict((p, callback_args[p]) for p in sig.parameters if p in callback_args)
return self.callback(**args)
def get_usage(self, **kwargs) -> Iterable[str]:
"""Returns the auto-generated usage help message for this command.
Keyword arguments
-----------------
* max_width: `int` - The width to wrap the usage help message to (0 for no wrapping).
* indent_width: `int` - The width of the indent for the command description.
Returns
-------
* `Iterable[str]`: The lines of the usage help message.
"""
if self.hidden:
return []
max_width = kwargs.get('max_width', 100)
indent_width = kwargs.get('indent_width', 4)
if max_width != 0:
for line in textwrap.wrap(str(self.syntax), width=max_width):
yield line
else:
yield str(self.syntax)
if self.description is not None:
if max_width != 0:
wrap_options = {
'width': max_width,
'initial_indent': ' ' * indent_width,
'subsequent_indent': ' ' * indent_width,
'expand_tabs': True,
}
for desc_line in self.description.splitlines():
if desc_line == '':
yield desc_line
else:
for line in textwrap.wrap(desc_line, **wrap_options):
yield line
else:
for desc_line in self.description.splitlines():
yield desc_line
| 35.402685 | 113 | 0.576114 |
75e0666dbef14852931a8bd0701cf4670c84ac6d
| 3,383 |
py
|
Python
|
train.py
|
Mrrrat/asr_project_template
|
50d264684d90bc45c59f3e9be5766fabaf090d25
|
[
"MIT"
] | null | null | null |
train.py
|
Mrrrat/asr_project_template
|
50d264684d90bc45c59f3e9be5766fabaf090d25
|
[
"MIT"
] | null | null | null |
train.py
|
Mrrrat/asr_project_template
|
50d264684d90bc45c59f3e9be5766fabaf090d25
|
[
"MIT"
] | null | null | null |
import argparse
import collections
import warnings
import numpy as np
import torch
import hw_asr.loss as module_loss
import hw_asr.metric as module_metric
import hw_asr.model as module_arch
from hw_asr.datasets.utils import get_dataloaders
from hw_asr.text_encoder.ctc_char_text_encoder import CTCCharTextEncoder
from hw_asr.trainer import Trainer
from hw_asr.utils import prepare_device
from hw_asr.utils.parse_config import ConfigParser
warnings.filterwarnings("ignore", category=UserWarning)
# fix random seeds for reproducibility
SEED = 67
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(SEED)
def main(config):
logger = config.get_logger("train")
# text_encoder
text_encoder = CTCCharTextEncoder.get_simple_alphabet()
# setup data_loader instances
dataloaders = get_dataloaders(config, text_encoder)
# build model architecture, then print to console
model = config.init_obj(config["arch"], module_arch, n_class=len(text_encoder))
logger.info(model)
# prepare for (multi-device) GPU training
device, device_ids = prepare_device(config["n_gpu"])
model = model.to(device)
if len(device_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=device_ids)
# get function handles of loss and metrics
loss_module = config.init_obj(config["loss"], module_loss).to(device)
metrics = [
config.init_obj(metric_dict, module_metric, text_encoder=text_encoder)
for metric_dict in config["metrics"]
]
# build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
trainable_params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = config.init_obj(config["optimizer"], torch.optim, trainable_params)
lr_scheduler = config.init_obj(config["lr_scheduler"], torch.optim.lr_scheduler, optimizer)
trainer = Trainer(
model,
loss_module,
metrics,
optimizer,
text_encoder=text_encoder,
config=config,
device=device,
data_loader=dataloaders["train"],
###OBO
#valid_data_loader=dataloaders["val"],
###OBO
lr_scheduler=lr_scheduler,
len_epoch=config["trainer"].get("len_epoch", None)
)
trainer.train()
if __name__ == "__main__":
args = argparse.ArgumentParser(description="PyTorch Template")
args.add_argument(
"-c",
"--config",
default=None,
type=str,
help="config file path (default: None)",
)
args.add_argument(
"-r",
"--resume",
default=None,
type=str,
help="path to latest checkpoint (default: None)",
)
args.add_argument(
"-d",
"--device",
default=None,
type=str,
help="indices of GPUs to enable (default: all)",
)
# custom cli options to modify configuration from default values given in json file.
CustomArgs = collections.namedtuple("CustomArgs", "flags type target")
options = [
CustomArgs(["--lr", "--learning_rate"], type=float, target="optimizer;args;lr"),
CustomArgs(
["--bs", "--batch_size"], type=int, target="data_loader;args;batch_size"
),
]
config = ConfigParser.from_args(args, options)
main(config)
| 30.477477 | 114 | 0.683713 |
df44f367025eae5ae6945328695608f1d17ff1b7
| 709 |
py
|
Python
|
util/ioutil.py
|
toastisme/dials
|
6bc8ababc33bfe334513677f8adb65c0e90003f3
|
[
"BSD-3-Clause"
] | 58 |
2015-10-15T09:28:20.000Z
|
2022-03-28T20:09:38.000Z
|
util/ioutil.py
|
toastisme/dials
|
6bc8ababc33bfe334513677f8adb65c0e90003f3
|
[
"BSD-3-Clause"
] | 1,741 |
2015-11-24T08:17:02.000Z
|
2022-03-31T15:46:42.000Z
|
util/ioutil.py
|
toastisme/dials
|
6bc8ababc33bfe334513677f8adb65c0e90003f3
|
[
"BSD-3-Clause"
] | 45 |
2015-10-14T13:44:16.000Z
|
2022-03-22T14:45:56.000Z
|
def get_inverse_ub_matrix_from_xparm(handle):
"""Get the inverse_ub_matrix from an xparm file handle
Params:
handle The file handle
Returns:
The inverse_ub_matrix
"""
from scitbx import matrix
return matrix.sqr(
handle.unit_cell_a_axis + handle.unit_cell_b_axis + handle.unit_cell_c_axis
)
def get_space_group_type_from_xparm(handle):
"""Get the space group tyoe object from an xparm file handle
Params:
handle The file handle
Returns:
The space group type object
"""
from cctbx import sgtbx
return sgtbx.space_group_type(
sgtbx.space_group(sgtbx.space_group_symbols(handle.space_group).hall())
)
| 22.870968 | 83 | 0.691114 |
ea25d42c186c5fcb781a8d4dc9c94dab71e99096
| 5,714 |
py
|
Python
|
MessagePassing/GCN.py
|
heming-zhang/PyTorch-Geometric-Study
|
06d5217210623c8d472467949c1b74e287558e8c
|
[
"Apache-2.0"
] | null | null | null |
MessagePassing/GCN.py
|
heming-zhang/PyTorch-Geometric-Study
|
06d5217210623c8d472467949c1b74e287558e8c
|
[
"Apache-2.0"
] | null | null | null |
MessagePassing/GCN.py
|
heming-zhang/PyTorch-Geometric-Study
|
06d5217210623c8d472467949c1b74e287558e8c
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import add_self_loops, degree
from torch_geometric.nn.inits import zeros
class GCNConv(MessagePassing):
def __init__(self, in_channels, out_channels):
super(GCNConv, self).__init__(aggr='add')
self.lin = torch.nn.Linear(in_channels, out_channels)
def forward(self, x, edge_index):
# X: [N, in_channels]
# edge_index: [2, E]
# 1.在邻接矩阵中增加自环
edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
# 2.对节点特征进行一个非线性转换
# x的维度会由[N, in_channels]转换为[N, out_channels]
x = self.lin(x)
# 3.计算标准化系数
# edge_index的第一个向量作为行坐标,第二个向量作为列坐标
row, col = edge_index
deg = degree(row, x.size(0), dtype=x.dtype)
deg_inv_sqrt = deg.pow(-1/2)
# norm的第一个元素就是edge_index中的第一列(第一条边)上的标准化系数
# tensor的乘法为对应元素乘法,tensor1[tensor2]后的维度与tensor2一致
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
# 4-6步的开始标志,内部实现了message-AGGREGATE-update
return self.propagate(edge_index, size=(x.size(0), x.size(1)), x=x, norm=norm)
def message(self, x_j, norm):
# x_j的维度为[E, out_channels]
print(x_j)
# 4.进行传递消息的构造,将标准化系数乘以邻域节点的特征信息得到传递信息
return norm.view(-1, 1) * x_j
def update(self, aggr_out):
# aggr_out的维度为[N, out_channels]
# 6.更新新的节点嵌入,这里没有做任何多余的映射过程
return aggr_out
# # 实例化对象
# conv = GCNConv(3, 3)
# # 构建数据
# edge_index = torch.tensor([
# [0, 1, 1, 2],
# [1, 0, 2, 1]
# ], dtype=torch.long)
# x = torch.tensor([
# [0, 0, 0],
# [1, 1, 1],
# [2, 2, 2]
# ], dtype=torch.float)
# # 默认为调用对象的forward函数
# x = conv(x, edge_index)
# print(x)
class GraphSAGELayer(MessagePassing):
"""GraphSAGE layer with edge attributes
Args:
input_dim(int): the size of input feature
output_dim(int): the size of output feature
aggr(str): aggregation function in message passing network
num_edge_type(int): number of edge type, 0 indicate no edge attribute
"""
def __init__(self,input_dim,output_dim,aggr="mean",num_edge_type=0):
super(GraphSAGELayer, self).__init__()
self.aggr=aggr
self.proj=nn.Linear(input_dim*2,output_dim,bias=False)
self.bias=nn.Parameter(torch.Tensor(output_dim))
if num_edge_type > 0:
self.edge_embedding = torch.nn.Embedding(num_edge_type, input_dim)
torch.nn.init.xavier_uniform_(self.edge_embedding.weight.data)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.proj.weight.data)
zeros(self.bias)
def forward(self,x,edge_index,edge_attr=None):
# don't need to add self loop in GraphSAGE
#edge_index,_ = add_self_loops(edge_index, num_nodes=x.size(0))
import pdb; pdb.set_trace()
if edge_attr is not None:
edge_embeddings = self.edge_embedding(edge_attr)
x_n= self.propagate(edge_index, x=x, edge_attr=edge_embeddings)
else:
x_n=self.propagate(edge_index, x=x, edge_attr=None)
return F.normalize(F.relu(self.proj(torch.cat([x,x_n],dim=-1))+self.bias),p=2,dim=-1)
def message(self, x_j, edge_attr):
import pdb; pdb.set_trace()
if edge_attr is not None:
return x_j + edge_attr
else:
return x_j
def update(self, aggr_out):
import pdb; pdb.set_trace()
return aggr_out
# # 实例化对象
# conv = GraphSAGELayer(3, 3, num_edge_type=2)
# # 构建数据
# edge_index = torch.tensor([
# [0, 1, 1, 2],
# [1, 0, 2, 1]
# ], dtype=torch.long)
# x = torch.tensor([
# [0, 0, 0],
# [1, 1, 1],
# [2, 2, 2]
# ], dtype=torch.float)
# # 默认为调用对象的forward函数
# x = conv(x, edge_index, edge_attr=torch.tensor(1))
# print(x)
class GCNConv2(MessagePassing):
def __init__(self, in_channels, out_channels):
super(GCNConv2, self).__init__(aggr='add')
self.lin = torch.nn.Linear(in_channels, out_channels)
def forward(self, x, edge_index):
# X: [N, in_channels]
# edge_index: [2, E]
weight='1'
addition='2'
# 1.在邻接矩阵中增加自环
edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
# 2.对节点特征进行一个非线性转换
# x的维度会由[N, in_channels]转换为[N, out_channels]
x = self.lin(x)
# 3.计算标准化系数
# edge_index的第一个向量作为行坐标,第二个向量作为列坐标
row, col = edge_index
deg = degree(row, x.size(0), dtype=x.dtype)
deg_inv_sqrt = deg.pow(-1/2)
# norm的第一个元素就是edge_index中的第一列(第一条边)上的标准化系数
# tensor的乘法为对应元素乘法,tensor1[tensor2]后的维度与tensor2一致
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
import pdb; pdb.set_trace()
print(x)
# 4-6步的开始标志,内部实现了message-AGGREGATE-update
return self.propagate(edge_index, size=(x.size(0), x.size(1)), x=x, norm=norm, weight=weight, addition=addition)
def message(self, x, x_j, x_i, norm, weight, addition):
# x_j的维度为[E, out_channels]
print(x_j)
print(x)
# 4.进行传递消息的构造,将标准化系数乘以邻域节点的特征信息得到传递信息
# print(weight)
# print(addition)
import pdb; pdb.set_trace()
return norm.view(-1, 1) * x_j
def update(self, aggr_out):
# aggr_out的维度为[N, out_channels]
# 6.更新新的节点嵌入,这里没有做任何多余的映射过程
return aggr_out
# 实例化对象
conv = GCNConv2(3, 3)
# 构建数据
edge_index = torch.tensor([
[0, 1, 1, 2],
[1, 0, 2, 1]
], dtype=torch.long)
x = torch.tensor([
[0, 0, 0],
[1, 1, 1],
[2, 2, 2]
], dtype=torch.float)
# 默认为调用对象的forward函数
x = conv(x, edge_index)
print(x)
| 28.009804 | 120 | 0.621106 |
ed00d4fec62885eec5625766f456ec56d8cc2b28
| 2,815 |
py
|
Python
|
tests/test_backtranslation_dataset.py
|
beichao1314/fairseq
|
b1521f962e4ca670311c0cd0c8b1dadf310cb242
|
[
"BSD-3-Clause"
] | 77 |
2019-04-29T01:56:04.000Z
|
2022-03-19T08:05:55.000Z
|
tests/test_backtranslation_dataset.py
|
beichao1314/fairseq
|
b1521f962e4ca670311c0cd0c8b1dadf310cb242
|
[
"BSD-3-Clause"
] | 7 |
2019-04-24T09:07:06.000Z
|
2022-03-28T21:58:04.000Z
|
tests/test_backtranslation_dataset.py
|
beichao1314/fairseq
|
b1521f962e4ca670311c0cd0c8b1dadf310cb242
|
[
"BSD-3-Clause"
] | 22 |
2019-04-28T04:39:41.000Z
|
2022-03-19T03:13:16.000Z
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import unittest
import tests.utils as test_utils
import torch
from fairseq.data.backtranslation_dataset import BacktranslationDataset
from fairseq import sequence_generator
class TestBacktranslationDataset(unittest.TestCase):
def setUp(self):
self.tgt_dict, self.w1, self.w2, self.src_tokens, self.src_lengths, self.model = (
test_utils.sequence_generator_setup()
)
dummy_src_samples = self.src_tokens
self.tgt_dataset = test_utils.TestDataset(data=dummy_src_samples)
def _backtranslation_dataset_helper(self, remove_eos_at_src):
"""
SequenceGenerator kwargs are same as defaults from fairseq/options.py
"""
backtranslation_dataset = BacktranslationDataset(
tgt_dataset=self.tgt_dataset,
tgt_dict=self.tgt_dict,
backtranslation_model=self.model,
max_len_a=0,
max_len_b=200,
beam_size=2,
unk_penalty=0,
sampling=False,
remove_eos_at_src=remove_eos_at_src,
generator_class=sequence_generator.SequenceGenerator,
)
dataloader = torch.utils.data.DataLoader(
backtranslation_dataset,
batch_size=2,
collate_fn=backtranslation_dataset.collater,
)
backtranslation_batch_result = next(iter(dataloader))
eos, pad, w1, w2 = self.tgt_dict.eos(), self.tgt_dict.pad(), self.w1, self.w2
# Note that we sort by src_lengths and add left padding, so actually
# ids will look like: [1, 0]
expected_src = torch.LongTensor([[w1, w2, w1, eos], [pad, pad, w1, eos]])
if remove_eos_at_src:
expected_src = expected_src[:, :-1]
expected_tgt = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]])
generated_src = backtranslation_batch_result["net_input"]["src_tokens"]
tgt_tokens = backtranslation_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def test_backtranslation_dataset_no_eos_at_src(self):
self._backtranslation_dataset_helper(remove_eos_at_src=True)
def test_backtranslation_dataset_with_eos_at_src(self):
self._backtranslation_dataset_helper(remove_eos_at_src=False)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
| 37.039474 | 90 | 0.687034 |
6a38b38210b27fb24862406255d78613651af0c7
| 31,910 |
py
|
Python
|
python/istio_api/mesh/v1alpha1/config_pb2.py
|
lei-tang/api
|
aa2c2a84418c5e4c5ac0719be542c1750ce41cc5
|
[
"Apache-2.0"
] | null | null | null |
python/istio_api/mesh/v1alpha1/config_pb2.py
|
lei-tang/api
|
aa2c2a84418c5e4c5ac0719be542c1750ce41cc5
|
[
"Apache-2.0"
] | null | null | null |
python/istio_api/mesh/v1alpha1/config_pb2.py
|
lei-tang/api
|
aa2c2a84418c5e4c5ac0719be542c1750ce41cc5
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mesh/v1alpha1/config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mesh/v1alpha1/config.proto',
package='istio.mesh.v1alpha1',
syntax='proto3',
serialized_pb=_b('\n\x1amesh/v1alpha1/config.proto\x12\x13istio.mesh.v1alpha1\x1a\x1egoogle/protobuf/duration.proto\"\xfb\x01\n\x07Tracing\x12\x35\n\x06zipkin\x18\x01 \x01(\x0b\x32#.istio.mesh.v1alpha1.Tracing.ZipkinH\x00\x12;\n\tlightstep\x18\x02 \x01(\x0b\x32&.istio.mesh.v1alpha1.Tracing.LightstepH\x00\x1a\x19\n\x06Zipkin\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x1aW\n\tLightstep\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x14\n\x0c\x61\x63\x63\x65ss_token\x18\x02 \x01(\t\x12\x0e\n\x06secure\x18\x03 \x01(\x08\x12\x13\n\x0b\x63\x61\x63\x65rt_path\x18\x04 \x01(\tB\x08\n\x06tracer\"\xe6\x05\n\x0bProxyConfig\x12\x13\n\x0b\x63onfig_path\x18\x01 \x01(\t\x12\x13\n\x0b\x62inary_path\x18\x02 \x01(\t\x12\x17\n\x0fservice_cluster\x18\x03 \x01(\t\x12\x31\n\x0e\x64rain_duration\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12;\n\x18parent_shutdown_duration\x18\x05 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x19\n\x11\x64iscovery_address\x18\x06 \x01(\t\x12\x1a\n\x0ezipkin_address\x18\x08 \x01(\tB\x02\x18\x01\x12\x32\n\x0f\x63onnect_timeout\x18\t \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x1a\n\x12statsd_udp_address\x18\n \x01(\t\x12\x18\n\x10proxy_admin_port\x18\x0b \x01(\x05\x12L\n\x19\x63ontrol_plane_auth_policy\x18\r \x01(\x0e\x32).istio.mesh.v1alpha1.AuthenticationPolicy\x12\x1a\n\x12\x63ustom_config_file\x18\x0e \x01(\t\x12\x18\n\x10stat_name_length\x18\x0f \x01(\x05\x12\x13\n\x0b\x63oncurrency\x18\x10 \x01(\x05\x12%\n\x1dproxy_bootstrap_template_path\x18\x11 \x01(\t\x12S\n\x11interception_mode\x18\x12 \x01(\x0e\x32\x38.istio.mesh.v1alpha1.ProxyConfig.InboundInterceptionMode\x12-\n\x07tracing\x18\x13 \x01(\x0b\x32\x1c.istio.mesh.v1alpha1.Tracing\"3\n\x17InboundInterceptionMode\x12\x0c\n\x08REDIRECT\x10\x00\x12\n\n\x06TPROXY\x10\x01J\x04\x08\x07\x10\x08J\x04\x08\x0c\x10\r\"\xdc\x07\n\nMeshConfig\x12\x1a\n\x12mixer_check_server\x18\x01 \x01(\t\x12\x1b\n\x13mixer_report_server\x18\x02 \x01(\t\x12\x1d\n\x15\x64isable_policy_checks\x18\x03 \x01(\x08\x12\x19\n\x11proxy_listen_port\x18\x04 \x01(\x05\x12\x17\n\x0fproxy_http_port\x18\x05 \x01(\x05\x12\x32\n\x0f\x63onnect_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x15\n\ringress_class\x18\x07 \x01(\t\x12\x17\n\x0fingress_service\x18\x08 \x01(\t\x12V\n\x17ingress_controller_mode\x18\t \x01(\x0e\x32\x35.istio.mesh.v1alpha1.MeshConfig.IngressControllerMode\x12\x43\n\x0b\x61uth_policy\x18\n \x01(\x0e\x32*.istio.mesh.v1alpha1.MeshConfig.AuthPolicyB\x02\x18\x01\x12\x16\n\x0e\x65nable_tracing\x18\x0c \x01(\x08\x12\x17\n\x0f\x61\x63\x63\x65ss_log_file\x18\r \x01(\t\x12\x38\n\x0e\x64\x65\x66\x61ult_config\x18\x0e \x01(\x0b\x32 .istio.mesh.v1alpha1.ProxyConfig\x12V\n\x17outbound_traffic_policy\x18\x11 \x01(\x0b\x32\x35.istio.mesh.v1alpha1.MeshConfig.OutboundTrafficPolicy\x12\'\n\x1f\x65nable_client_side_policy_check\x18\x13 \x01(\x08\x12\x14\n\x0csds_uds_path\x18\x14 \x01(\t\x12\x16\n\x0egalley_address\x18\x16 \x01(\t\x1a\xa5\x01\n\x15OutboundTrafficPolicy\x12H\n\x04mode\x18\x01 \x01(\x0e\x32:.istio.mesh.v1alpha1.MeshConfig.OutboundTrafficPolicy.Mode\"B\n\x04Mode\x12\x11\n\rREGISTRY_ONLY\x10\x00\x12\r\n\tALLOW_ANY\x10\x01\x12\x18\n\x14VIRTUAL_SERVICE_ONLY\x10\x02\"9\n\x15IngressControllerMode\x12\x07\n\x03OFF\x10\x00\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x01\x12\n\n\x06STRICT\x10\x02\"&\n\nAuthPolicy\x12\x08\n\x04NONE\x10\x00\x12\x0e\n\nMUTUAL_TLS\x10\x01J\x04\x08\x0b\x10\x0cJ\x04\x08\x0f\x10\x10J\x04\x08\x10\x10\x11J\x04\x08\x12\x10\x13J\x04\x08\x15\x10\x16*>\n\x14\x41uthenticationPolicy\x12\x08\n\x04NONE\x10\x00\x12\x0e\n\nMUTUAL_TLS\x10\x01\x12\x0c\n\x07INHERIT\x10\xe8\x07\x42\x1cZ\x1aistio.io/api/mesh/v1alpha1b\x06proto3')
,
dependencies=[google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,])
_AUTHENTICATIONPOLICY = _descriptor.EnumDescriptor(
name='AuthenticationPolicy',
full_name='istio.mesh.v1alpha1.AuthenticationPolicy',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MUTUAL_TLS', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INHERIT', index=2, number=1000,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2073,
serialized_end=2135,
)
_sym_db.RegisterEnumDescriptor(_AUTHENTICATIONPOLICY)
AuthenticationPolicy = enum_type_wrapper.EnumTypeWrapper(_AUTHENTICATIONPOLICY)
NONE = 0
MUTUAL_TLS = 1
INHERIT = 1000
_PROXYCONFIG_INBOUNDINTERCEPTIONMODE = _descriptor.EnumDescriptor(
name='InboundInterceptionMode',
full_name='istio.mesh.v1alpha1.ProxyConfig.InboundInterceptionMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='REDIRECT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TPROXY', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1017,
serialized_end=1068,
)
_sym_db.RegisterEnumDescriptor(_PROXYCONFIG_INBOUNDINTERCEPTIONMODE)
_MESHCONFIG_OUTBOUNDTRAFFICPOLICY_MODE = _descriptor.EnumDescriptor(
name='Mode',
full_name='istio.mesh.v1alpha1.MeshConfig.OutboundTrafficPolicy.Mode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='REGISTRY_ONLY', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ALLOW_ANY', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VIRTUAL_SERVICE_ONLY', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1876,
serialized_end=1942,
)
_sym_db.RegisterEnumDescriptor(_MESHCONFIG_OUTBOUNDTRAFFICPOLICY_MODE)
_MESHCONFIG_INGRESSCONTROLLERMODE = _descriptor.EnumDescriptor(
name='IngressControllerMode',
full_name='istio.mesh.v1alpha1.MeshConfig.IngressControllerMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OFF', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STRICT', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1944,
serialized_end=2001,
)
_sym_db.RegisterEnumDescriptor(_MESHCONFIG_INGRESSCONTROLLERMODE)
_MESHCONFIG_AUTHPOLICY = _descriptor.EnumDescriptor(
name='AuthPolicy',
full_name='istio.mesh.v1alpha1.MeshConfig.AuthPolicy',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MUTUAL_TLS', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2003,
serialized_end=2041,
)
_sym_db.RegisterEnumDescriptor(_MESHCONFIG_AUTHPOLICY)
_TRACING_ZIPKIN = _descriptor.Descriptor(
name='Zipkin',
full_name='istio.mesh.v1alpha1.Tracing.Zipkin',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='address', full_name='istio.mesh.v1alpha1.Tracing.Zipkin.address', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=211,
serialized_end=236,
)
_TRACING_LIGHTSTEP = _descriptor.Descriptor(
name='Lightstep',
full_name='istio.mesh.v1alpha1.Tracing.Lightstep',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='address', full_name='istio.mesh.v1alpha1.Tracing.Lightstep.address', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='access_token', full_name='istio.mesh.v1alpha1.Tracing.Lightstep.access_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='secure', full_name='istio.mesh.v1alpha1.Tracing.Lightstep.secure', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cacert_path', full_name='istio.mesh.v1alpha1.Tracing.Lightstep.cacert_path', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=238,
serialized_end=325,
)
_TRACING = _descriptor.Descriptor(
name='Tracing',
full_name='istio.mesh.v1alpha1.Tracing',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='zipkin', full_name='istio.mesh.v1alpha1.Tracing.zipkin', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lightstep', full_name='istio.mesh.v1alpha1.Tracing.lightstep', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TRACING_ZIPKIN, _TRACING_LIGHTSTEP, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='tracer', full_name='istio.mesh.v1alpha1.Tracing.tracer',
index=0, containing_type=None, fields=[]),
],
serialized_start=84,
serialized_end=335,
)
_PROXYCONFIG = _descriptor.Descriptor(
name='ProxyConfig',
full_name='istio.mesh.v1alpha1.ProxyConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='config_path', full_name='istio.mesh.v1alpha1.ProxyConfig.config_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='binary_path', full_name='istio.mesh.v1alpha1.ProxyConfig.binary_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='service_cluster', full_name='istio.mesh.v1alpha1.ProxyConfig.service_cluster', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='drain_duration', full_name='istio.mesh.v1alpha1.ProxyConfig.drain_duration', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parent_shutdown_duration', full_name='istio.mesh.v1alpha1.ProxyConfig.parent_shutdown_duration', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='discovery_address', full_name='istio.mesh.v1alpha1.ProxyConfig.discovery_address', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zipkin_address', full_name='istio.mesh.v1alpha1.ProxyConfig.zipkin_address', index=6,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001')), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='connect_timeout', full_name='istio.mesh.v1alpha1.ProxyConfig.connect_timeout', index=7,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='statsd_udp_address', full_name='istio.mesh.v1alpha1.ProxyConfig.statsd_udp_address', index=8,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='proxy_admin_port', full_name='istio.mesh.v1alpha1.ProxyConfig.proxy_admin_port', index=9,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='control_plane_auth_policy', full_name='istio.mesh.v1alpha1.ProxyConfig.control_plane_auth_policy', index=10,
number=13, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='custom_config_file', full_name='istio.mesh.v1alpha1.ProxyConfig.custom_config_file', index=11,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stat_name_length', full_name='istio.mesh.v1alpha1.ProxyConfig.stat_name_length', index=12,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='concurrency', full_name='istio.mesh.v1alpha1.ProxyConfig.concurrency', index=13,
number=16, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='proxy_bootstrap_template_path', full_name='istio.mesh.v1alpha1.ProxyConfig.proxy_bootstrap_template_path', index=14,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='interception_mode', full_name='istio.mesh.v1alpha1.ProxyConfig.interception_mode', index=15,
number=18, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tracing', full_name='istio.mesh.v1alpha1.ProxyConfig.tracing', index=16,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_PROXYCONFIG_INBOUNDINTERCEPTIONMODE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=338,
serialized_end=1080,
)
_MESHCONFIG_OUTBOUNDTRAFFICPOLICY = _descriptor.Descriptor(
name='OutboundTrafficPolicy',
full_name='istio.mesh.v1alpha1.MeshConfig.OutboundTrafficPolicy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mode', full_name='istio.mesh.v1alpha1.MeshConfig.OutboundTrafficPolicy.mode', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_MESHCONFIG_OUTBOUNDTRAFFICPOLICY_MODE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1777,
serialized_end=1942,
)
_MESHCONFIG = _descriptor.Descriptor(
name='MeshConfig',
full_name='istio.mesh.v1alpha1.MeshConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mixer_check_server', full_name='istio.mesh.v1alpha1.MeshConfig.mixer_check_server', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mixer_report_server', full_name='istio.mesh.v1alpha1.MeshConfig.mixer_report_server', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disable_policy_checks', full_name='istio.mesh.v1alpha1.MeshConfig.disable_policy_checks', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='proxy_listen_port', full_name='istio.mesh.v1alpha1.MeshConfig.proxy_listen_port', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='proxy_http_port', full_name='istio.mesh.v1alpha1.MeshConfig.proxy_http_port', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='connect_timeout', full_name='istio.mesh.v1alpha1.MeshConfig.connect_timeout', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ingress_class', full_name='istio.mesh.v1alpha1.MeshConfig.ingress_class', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ingress_service', full_name='istio.mesh.v1alpha1.MeshConfig.ingress_service', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ingress_controller_mode', full_name='istio.mesh.v1alpha1.MeshConfig.ingress_controller_mode', index=8,
number=9, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='auth_policy', full_name='istio.mesh.v1alpha1.MeshConfig.auth_policy', index=9,
number=10, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001')), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_tracing', full_name='istio.mesh.v1alpha1.MeshConfig.enable_tracing', index=10,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='access_log_file', full_name='istio.mesh.v1alpha1.MeshConfig.access_log_file', index=11,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_config', full_name='istio.mesh.v1alpha1.MeshConfig.default_config', index=12,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outbound_traffic_policy', full_name='istio.mesh.v1alpha1.MeshConfig.outbound_traffic_policy', index=13,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_client_side_policy_check', full_name='istio.mesh.v1alpha1.MeshConfig.enable_client_side_policy_check', index=14,
number=19, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sds_uds_path', full_name='istio.mesh.v1alpha1.MeshConfig.sds_uds_path', index=15,
number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='galley_address', full_name='istio.mesh.v1alpha1.MeshConfig.galley_address', index=16,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_MESHCONFIG_OUTBOUNDTRAFFICPOLICY, ],
enum_types=[
_MESHCONFIG_INGRESSCONTROLLERMODE,
_MESHCONFIG_AUTHPOLICY,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1083,
serialized_end=2071,
)
_TRACING_ZIPKIN.containing_type = _TRACING
_TRACING_LIGHTSTEP.containing_type = _TRACING
_TRACING.fields_by_name['zipkin'].message_type = _TRACING_ZIPKIN
_TRACING.fields_by_name['lightstep'].message_type = _TRACING_LIGHTSTEP
_TRACING.oneofs_by_name['tracer'].fields.append(
_TRACING.fields_by_name['zipkin'])
_TRACING.fields_by_name['zipkin'].containing_oneof = _TRACING.oneofs_by_name['tracer']
_TRACING.oneofs_by_name['tracer'].fields.append(
_TRACING.fields_by_name['lightstep'])
_TRACING.fields_by_name['lightstep'].containing_oneof = _TRACING.oneofs_by_name['tracer']
_PROXYCONFIG.fields_by_name['drain_duration'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_PROXYCONFIG.fields_by_name['parent_shutdown_duration'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_PROXYCONFIG.fields_by_name['connect_timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_PROXYCONFIG.fields_by_name['control_plane_auth_policy'].enum_type = _AUTHENTICATIONPOLICY
_PROXYCONFIG.fields_by_name['interception_mode'].enum_type = _PROXYCONFIG_INBOUNDINTERCEPTIONMODE
_PROXYCONFIG.fields_by_name['tracing'].message_type = _TRACING
_PROXYCONFIG_INBOUNDINTERCEPTIONMODE.containing_type = _PROXYCONFIG
_MESHCONFIG_OUTBOUNDTRAFFICPOLICY.fields_by_name['mode'].enum_type = _MESHCONFIG_OUTBOUNDTRAFFICPOLICY_MODE
_MESHCONFIG_OUTBOUNDTRAFFICPOLICY.containing_type = _MESHCONFIG
_MESHCONFIG_OUTBOUNDTRAFFICPOLICY_MODE.containing_type = _MESHCONFIG_OUTBOUNDTRAFFICPOLICY
_MESHCONFIG.fields_by_name['connect_timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_MESHCONFIG.fields_by_name['ingress_controller_mode'].enum_type = _MESHCONFIG_INGRESSCONTROLLERMODE
_MESHCONFIG.fields_by_name['auth_policy'].enum_type = _MESHCONFIG_AUTHPOLICY
_MESHCONFIG.fields_by_name['default_config'].message_type = _PROXYCONFIG
_MESHCONFIG.fields_by_name['outbound_traffic_policy'].message_type = _MESHCONFIG_OUTBOUNDTRAFFICPOLICY
_MESHCONFIG_INGRESSCONTROLLERMODE.containing_type = _MESHCONFIG
_MESHCONFIG_AUTHPOLICY.containing_type = _MESHCONFIG
DESCRIPTOR.message_types_by_name['Tracing'] = _TRACING
DESCRIPTOR.message_types_by_name['ProxyConfig'] = _PROXYCONFIG
DESCRIPTOR.message_types_by_name['MeshConfig'] = _MESHCONFIG
DESCRIPTOR.enum_types_by_name['AuthenticationPolicy'] = _AUTHENTICATIONPOLICY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Tracing = _reflection.GeneratedProtocolMessageType('Tracing', (_message.Message,), dict(
Zipkin = _reflection.GeneratedProtocolMessageType('Zipkin', (_message.Message,), dict(
DESCRIPTOR = _TRACING_ZIPKIN,
__module__ = 'mesh.v1alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.Tracing.Zipkin)
))
,
Lightstep = _reflection.GeneratedProtocolMessageType('Lightstep', (_message.Message,), dict(
DESCRIPTOR = _TRACING_LIGHTSTEP,
__module__ = 'mesh.v1alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.Tracing.Lightstep)
))
,
DESCRIPTOR = _TRACING,
__module__ = 'mesh.v1alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.Tracing)
))
_sym_db.RegisterMessage(Tracing)
_sym_db.RegisterMessage(Tracing.Zipkin)
_sym_db.RegisterMessage(Tracing.Lightstep)
ProxyConfig = _reflection.GeneratedProtocolMessageType('ProxyConfig', (_message.Message,), dict(
DESCRIPTOR = _PROXYCONFIG,
__module__ = 'mesh.v1alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.ProxyConfig)
))
_sym_db.RegisterMessage(ProxyConfig)
MeshConfig = _reflection.GeneratedProtocolMessageType('MeshConfig', (_message.Message,), dict(
OutboundTrafficPolicy = _reflection.GeneratedProtocolMessageType('OutboundTrafficPolicy', (_message.Message,), dict(
DESCRIPTOR = _MESHCONFIG_OUTBOUNDTRAFFICPOLICY,
__module__ = 'mesh.v1alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.MeshConfig.OutboundTrafficPolicy)
))
,
DESCRIPTOR = _MESHCONFIG,
__module__ = 'mesh.v1alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.MeshConfig)
))
_sym_db.RegisterMessage(MeshConfig)
_sym_db.RegisterMessage(MeshConfig.OutboundTrafficPolicy)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z\032istio.io/api/mesh/v1alpha1'))
_PROXYCONFIG.fields_by_name['zipkin_address'].has_options = True
_PROXYCONFIG.fields_by_name['zipkin_address']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))
_MESHCONFIG.fields_by_name['auth_policy'].has_options = True
_MESHCONFIG.fields_by_name['auth_policy']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))
# @@protoc_insertion_point(module_scope)
| 46.652047 | 3,662 | 0.753682 |
329e284086b62a6e1a77413ad523328211ccc0a1
| 602 |
py
|
Python
|
spaces/migrations/0003_model_permissions.py
|
jgillick/Spaces
|
96247701d530a017f10a0bd0ac6cf241d621be11
|
[
"MIT"
] | 1 |
2018-08-12T23:43:45.000Z
|
2018-08-12T23:43:45.000Z
|
spaces/migrations/0003_model_permissions.py
|
jgillick/Spaces
|
96247701d530a017f10a0bd0ac6cf241d621be11
|
[
"MIT"
] | 3 |
2016-01-13T10:12:51.000Z
|
2016-01-13T10:13:15.000Z
|
spaces/migrations/0003_model_permissions.py
|
jgillick/Spaces
|
96247701d530a017f10a0bd0ac6cf241d621be11
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-10 08:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('spaces', '0002_populate_default_spaces'),
]
operations = [
migrations.AlterModelOptions(
name='document',
options={'permissions': (('view_document', 'Can view a document'),)},
),
migrations.AlterModelOptions(
name='space',
options={'permissions': (('view_space', 'Can view a space'),)},
),
]
| 25.083333 | 81 | 0.596346 |
894a455bf05a7148ad722cda4e86f040255cdfbe
| 3,892 |
py
|
Python
|
2_Clasification/exo2-naive.py
|
Focom/NLPWork2
|
b83cd114b71f5be9d18a322197e4ac4fd9b094ba
|
[
"MIT"
] | null | null | null |
2_Clasification/exo2-naive.py
|
Focom/NLPWork2
|
b83cd114b71f5be9d18a322197e4ac4fd9b094ba
|
[
"MIT"
] | null | null | null |
2_Clasification/exo2-naive.py
|
Focom/NLPWork2
|
b83cd114b71f5be9d18a322197e4ac4fd9b094ba
|
[
"MIT"
] | null | null | null |
import exo2, pandas, glob
from sklearn.naive_bayes import MultinomialNB as mod
from sklearn.ensemble import RandomForestClassifier as mod2
from sklearn.feature_extraction.text import CountVectorizer
#Choix du classifieurn i=1 NaivesBayes sinon RandomForest
def choiceClassifier(i):
if(i==1):
classifier=mod
else:
classifier=mod2
return classifier
#Construction du fichier csv s'il n'est pas encore présent dans le répértoire
def construccsv():
a = glob.glob("*.csv")
if (len(a)==0):
exo2.constructcsv()
construccsv()
#Construction du modèle prédictive en fonction du choiceClassifier() et prédiction sur un certain nombre de ligne résévées au test
def constructModel(i,cc,j):
classifieur=choiceClassifier(j)
# Transformation de mon document csv en dataframe grâce à panda
df_train= pandas.read_csv('mycsv.csv')
final=pandas.DataFrame(data=df_train)
#Y sera mon vecteur de classe et x le vecteur de question associé
vecteurClasseTrain=final["Classe"][:cc]
vecteurQuestion=final["Question"]
classifier=classifieur()
targetsClasse=vecteurClasseTrain[:cc].values
vecteurClasseTest=final["Classe"][cc:389].values
count_vectorizer = CountVectorizer()
counts = count_vectorizer.fit_transform(vecteurQuestion[:cc].values)
# print(count_vectorizer.get_feature_names())
classifier.fit(counts, targetsClasse)
examples = vecteurQuestion[cc:389]
example_counts = count_vectorizer.transform(examples)
predictions = classifier.predict(example_counts)
if (i==1):
return predictions
elif(i==2):
return vecteurClasseTest
elif(i==3):
return examples
#Ici on construit un dictionnaire qui nous stock les différence entre les vraies prédictions et les fausses pour chaque classe
def construcTableRP(predictions,trueclass):
result = {}
for i in range(0,len(predictions)):
if(predictions[i]==trueclass[i]):
result[str(i)]=({
"class":predictions[i],
"bool": True
})
else:
result[str(i)]=({
"class": predictions[i],
"bool": False
})
return result
def truePositive(classe,tailletraining,j):
data = construcTableRP(constructModel(1,tailletraining,j),constructModel(2,tailletraining,j))
result=0
for i in range(0,len(data)):
if ((classe == data[str(i)]["class"]) & (data[str(i)]["bool"])) :
# print(data[str(i)]["class"])
result+=1
return result
# print(truePositive("DEFINITION",300))
def falsePositive(classe,tailletraining,j):
data = construcTableRP(constructModel(1,tailletraining,j),constructModel(2,tailletraining,j))
# print(data)
result=0
for i in range(0,len(data)):
if ((classe == data[str(i)]["class"]) & (data[str(i)]["bool"]==False)) :
result+=1
return result
def trueNegative(classeOption,tailletraining,j):
data = constructModel(2,tailletraining,j)
data.sort()
result=0
print(data)
for classe in data:
if(classe!=classeOption):
result+=1
return result
# print(trueNegative("DEFINITION",300,1))
def falseNegative(classeOption,tailletraining,j):
data = constructModel(2,tailletraining,j)
data.sort()
result=0
print(data)
for classe in data:
if(classe==classeOption):
result+=1
return result
# print(falseNegative("DEFINITION",300,1))
def precision(classe,trainingSize,j):
return truePositive(classe,trainingSize,j)/(truePositive(classe,trainingSize,j)+falsePositive(classe,trainingSize,j))
print(precision("DEFINITION",300,2))
def recall(classe,trainingSize,j):
return truePositive(classe,trainingSize,j)/(falseNegative(classe,trainingSize,j))
# print(recall("DEFINITION",300))
| 24.632911 | 130 | 0.676259 |
151877c6fea1c61308f6fbfbe50797d8f163e587
| 460 |
py
|
Python
|
fluent_python/object/func_para_reference.py
|
helloTC/LearnPython
|
bd5fc977c800f3dc2d239b8cb7ad7e6e1b42fce8
|
[
"MIT"
] | null | null | null |
fluent_python/object/func_para_reference.py
|
helloTC/LearnPython
|
bd5fc977c800f3dc2d239b8cb7ad7e6e1b42fce8
|
[
"MIT"
] | null | null | null |
fluent_python/object/func_para_reference.py
|
helloTC/LearnPython
|
bd5fc977c800f3dc2d239b8cb7ad7e6e1b42fce8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
def f(a, b):
a += b
return a
if __name__ == "__main__":
x1 = 1
x2 = 2
print("integer number: {}".format(f(x1, x2)))
print("x1 = {0}, x2 = {1}".format(x1, x2))
x3 = [1,2]
x4 = [3,4]
print("list: {}".format(f(x3, x4)))
print("x3 = {0}, x4 = {1}".format(x3, x4))
x5 = (1,2)
x6 = (3,4)
print("tuple: {}".format(f(x5, x6)))
print("x5 = {1}, x6 = {0}".format(x5, x6))
| 19.166667 | 49 | 0.463043 |
0a4a9fd3b088da30213e335da832666fbc1950a3
| 601 |
py
|
Python
|
boucanpy/cli/alembic/alembic_current.py
|
bbhunter/boucanpy
|
7d2fb105e7b1e90653a511534fb878bb62d02f17
|
[
"MIT"
] | 34 |
2019-11-16T17:22:15.000Z
|
2022-02-11T23:12:46.000Z
|
boucanpy/cli/alembic/alembic_current.py
|
bbhunter/boucanpy
|
7d2fb105e7b1e90653a511534fb878bb62d02f17
|
[
"MIT"
] | 1 |
2021-02-09T09:34:55.000Z
|
2021-02-10T21:46:20.000Z
|
boucanpy/cli/alembic/alembic_current.py
|
bbhunter/boucanpy
|
7d2fb105e7b1e90653a511534fb878bb62d02f17
|
[
"MIT"
] | 9 |
2019-11-18T22:18:07.000Z
|
2021-02-08T13:23:51.000Z
|
from os.path import join
from boucanpy.core.utils import db_dir
from boucanpy.db.session import session, db_register
from boucanpy.db.utils import make_db_url
from boucanpy.db.migrate.current import current
from boucanpy.cli.base import BaseCommand
class AlembicCurrent(BaseCommand):
name = "alembic-current"
aliases = ["al-current"]
description = "run alembic current"
migration_dir = join(db_dir("alembic"), "api")
@classmethod
def parser(cls, parser):
return parser
async def run(self):
db_register(make_db_url())
current(self.migration_dir)
| 27.318182 | 52 | 0.728785 |
210217c0aef4046ae47dcd7821eb79b6266489ee
| 4,546 |
py
|
Python
|
vfoot/runner.py
|
filipecn/vfoot
|
3059f5bb471b6bdf92a18a7cdb6b33a2c8852046
|
[
"MIT"
] | null | null | null |
vfoot/runner.py
|
filipecn/vfoot
|
3059f5bb471b6bdf92a18a7cdb6b33a2c8852046
|
[
"MIT"
] | null | null | null |
vfoot/runner.py
|
filipecn/vfoot
|
3059f5bb471b6bdf92a18a7cdb6b33a2c8852046
|
[
"MIT"
] | null | null | null |
import graphics
import imgui
from game.game import Game, GameState
from graphics.division_round_screen import DivisionRoundScreen
from graphics.manager_screen import ManagerScreen
game = None
division_round_screen = None
manager_screens = []
current_manager_screen = 0
class ChooseCountriesScreen:
def __init__(self):
# screen elements
self.country_name = ["Pais " + str(i) for i in range(4)]
self.country_checked = 4 * [False]
# flow states
self.active = False
def country_list(self):
l = []
for i in range(4):
if self.country_checked[i]:
l.append(i)
return l
def draw(self):
completed = False
if self.active:
imgui.begin("##window", False, imgui.WINDOW_NO_TITLE_BAR)
if imgui.button("Select All"):
for i in range(len(self.country_checked)):
self.country_checked[i] = True
imgui.same_line()
if imgui.button("Select None"):
for i in range(len(self.country_checked)):
self.country_checked[i] = False
for i in range(4):
_, state = imgui.checkbox(self.country_name[i],
self.country_checked[i])
self.country_checked[i] = state
if imgui.button("next"):
if True in self.country_checked:
self.active = False
completed = True
imgui.end()
return completed
class ChooseManagersScreen:
def __init__(self):
# screen elements
self.manager_label = ["Tecnico " + str(i) for i in range(6)]
self.manager_name = 6 * [""]
# flow states
self.active = False
def manager_list(self):
l = []
for name in self.manager_name:
if len(name) > 0:
l.append(name)
return l
def draw(self):
completed = False
if self.active:
imgui.begin("##window", False, imgui.WINDOW_NO_TITLE_BAR)
for i in range(6):
_, value = imgui.input_text(self.manager_label[i],
self.manager_name[i], 30)
self.manager_name[i] = value
if imgui.button("play!"):
for name in self.manager_name:
if len(name):
self.active = False
completed = True
break
imgui.end()
return completed
choose_countries_screen = ChooseCountriesScreen()
choose_managers_screen = ChooseManagersScreen()
def draw_new_game_window():
pass
def draw_main_menu():
global choose_countries_screen
if imgui.begin_main_menu_bar():
if imgui.begin_menu("Jogo", True):
clicked, _ = imgui.menu_item(
"Novo Jogo", 'Cmd+S', False, True
)
if clicked:
choose_countries_screen.active = True
clicked, _ = imgui.menu_item(
"Quit", 'Cmd+Q', False, True
)
if clicked:
exit(1)
imgui.end_menu()
imgui.end_main_menu_bar()
def render():
global game
global division_round_screen
global manager_screens
global current_manager_screen
draw_main_menu()
if choose_countries_screen.draw():
choose_managers_screen.active = True
if choose_managers_screen.draw():
game = Game(
choose_managers_screen.manager_list(),
choose_countries_screen.country_list())
# create manager screens
print(choose_managers_screen.manager_list())
for manager in choose_managers_screen.manager_list():
manager_screens.append(ManagerScreen(game, manager))
if game is not None:
game.run()
if game.current_state == GameState.DIVISION_ROUND_STATE:
if division_round_screen is None:
division_round_screen = DivisionRoundScreen(game)
division_round_screen.draw()
elif game.current_state == GameState.MANAGER_STATE:
if current_manager_screen >= len(manager_screens):
current_manager_screen = 0
game.current_state = GameState.DIVISION_ROUND_STATE
elif manager_screens[current_manager_screen].draw():
current_manager_screen += 1
if __name__ == "__main__":
graphics.app(render)
| 30.306667 | 69 | 0.573471 |
f75470306c4f47a4c9f93d16757d465a2fa4d4bb
| 24,470 |
py
|
Python
|
depend/zcash/qa/rpc-tests/fundrawtransaction.py
|
ZcashFoundation/zcashconsensus
|
c9fbc441efd78593ba6a9828be45baf2d6469757
|
[
"Apache-2.0"
] | null | null | null |
depend/zcash/qa/rpc-tests/fundrawtransaction.py
|
ZcashFoundation/zcashconsensus
|
c9fbc441efd78593ba6a9828be45baf2d6469757
|
[
"Apache-2.0"
] | 1 |
2020-07-17T14:09:32.000Z
|
2020-07-17T14:09:32.000Z
|
depend/zcash/qa/rpc-tests/fundrawtransaction.py
|
ZcashFoundation/zcashconsensus
|
c9fbc441efd78593ba6a9828be45baf2d6469757
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
from test_framework.test_framework import BitcoinTestFramework
from test_framework.authproxy import JSONRPCException
from test_framework.util import assert_equal, assert_greater_than, \
start_nodes, connect_nodes_bi, stop_nodes, \
wait_bitcoinds
from decimal import Decimal
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-experimentalfeatures', '-developerencryptwallet']] * 4)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print("Mining blocks...")
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = max(2 * min_relay_tx_fee/1000, Decimal("0.00000001"))
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(201)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_equal(len(dec_tx['vin']) > 0, True) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_equal(len(dec_tx['vin']) > 0, True) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_equal(len(dec_tx['vin']) > 0, True)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(len(dec_tx['vin']) > 0, True)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 5.0:
utx = aUtx
break
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal('1.0') }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 5.0:
utx = aUtx
break
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal('5.0') - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 1.0:
utx = aUtx
break
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal('1.0') }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 4-byte versionGroupId + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:90] + "0100" + rawtx[92:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 1.0:
utx = aUtx
if aUtx['amount'] == 5.0:
utx2 = aUtx
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 1.0:
utx = aUtx
if aUtx['amount'] == 5.0:
utx2 = aUtx
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
errorString = ""
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
except JSONRPCException as e:
errorString = e.error['message']
assert_equal("Insufficient" in errorString, True)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
error = False
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.2)
except:
error = True
assert(error)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('11.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('10.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert_equal("fee" in result.keys(), True)
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
if __name__ == '__main__':
RawTransactionsTest().main()
| 40.313015 | 223 | 0.555497 |
c56b66c960cb59012ea4751d6bf0e410cf392a91
| 8,777 |
py
|
Python
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/storage/netapp/netapp_e_snapshot_images.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 17 |
2017-06-07T23:15:01.000Z
|
2021-08-30T14:32:36.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/storage/netapp/netapp_e_snapshot_images.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 9 |
2017-06-25T03:31:52.000Z
|
2021-05-17T23:43:12.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/storage/netapp/netapp_e_snapshot_images.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 3 |
2018-05-26T21:31:22.000Z
|
2019-09-28T17:00:45.000Z
|
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_snapshot_images
short_description: NetApp E-Series create and delete snapshot images
description:
- Create and delete snapshots images on snapshot groups for NetApp E-series storage arrays.
- Only the oldest snapshot image can be deleted so consistency is preserved.
- "Related: Snapshot volumes are created from snapshot images."
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
snapshot_group:
description:
- The name of the snapshot group in which you want to create a snapshot image.
required: True
state:
description:
- Whether a new snapshot image should be created or oldest be deleted.
required: True
choices: ['create', 'remove']
"""
EXAMPLES = """
- name: Create Snapshot
netapp_e_snapshot_images:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ validate_certs }}"
snapshot_group: "3300000060080E5000299C24000005B656D9F394"
state: 'create'
"""
RETURN = """
---
msg:
description: State of operation
type: str
returned: always
sample: "Created snapshot image"
image_id:
description: ID of snapshot image
type: str
returned: state == created
sample: "3400000060080E5000299B640063074057BC5C5E "
"""
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as err:
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except Exception:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def snapshot_group_from_name(module, ssid, api_url, api_pwd, api_usr, name):
snap_groups = 'storage-systems/%s/snapshot-groups' % ssid
snap_groups_url = api_url + snap_groups
(ret, snapshot_groups) = request(snap_groups_url, url_username=api_usr, url_password=api_pwd, headers=HEADERS,
validate_certs=module.params['validate_certs'])
snapshot_group_id = None
for snapshot_group in snapshot_groups:
if name == snapshot_group['label']:
snapshot_group_id = snapshot_group['pitGroupRef']
break
if snapshot_group_id is None:
module.fail_json(msg="Failed to lookup snapshot group. Group [%s]. Id [%s]." % (name, ssid))
return snapshot_group
def oldest_image(module, ssid, api_url, api_pwd, api_usr, name):
get_status = 'storage-systems/%s/snapshot-images' % ssid
url = api_url + get_status
try:
(ret, images) = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS,
validate_certs=module.params['validate_certs'])
except Exception as err:
module.fail_json(msg="Failed to get snapshot images for group. Group [%s]. Id [%s]. Error [%s]" %
(name, ssid, to_native(err)))
if not images:
module.exit_json(msg="There are no snapshot images to remove. Group [%s]. Id [%s]." % (name, ssid))
oldest = min(images, key=lambda x: x['pitSequenceNumber'])
if oldest is None or "pitRef" not in oldest:
module.fail_json(msg="Failed to lookup oldest snapshot group. Group [%s]. Id [%s]." % (name, ssid))
return oldest
def create_image(module, ssid, api_url, pwd, user, p, snapshot_group):
snapshot_group_obj = snapshot_group_from_name(module, ssid, api_url, pwd, user, snapshot_group)
snapshot_group_id = snapshot_group_obj['pitGroupRef']
endpoint = 'storage-systems/%s/snapshot-images' % ssid
url = api_url + endpoint
post_data = json.dumps({'groupId': snapshot_group_id})
image_data = request(url, data=post_data, method='POST', url_username=user, url_password=pwd, headers=HEADERS,
validate_certs=module.params['validate_certs'])
if image_data[1]['status'] == 'optimal':
status = True
id = image_data[1]['id']
else:
status = False
id = ''
return status, id
def delete_image(module, ssid, api_url, pwd, user, snapshot_group):
image = oldest_image(module, ssid, api_url, pwd, user, snapshot_group)
image_id = image['pitRef']
endpoint = 'storage-systems/%s/snapshot-images/%s' % (ssid, image_id)
url = api_url + endpoint
try:
(ret, image_data) = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS,
validate_certs=module.params['validate_certs'])
except Exception as e:
image_data = (e[0], e[1])
if ret == 204:
deleted_status = True
error_message = ''
else:
deleted_status = False
error_message = image_data[1]['errorMessage']
return deleted_status, error_message
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
snapshot_group=dict(required=True, type='str'),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
validate_certs=dict(required=False, default=True),
state=dict(required=True, choices=['create', 'remove'], type='str'),
))
module = AnsibleModule(argument_spec)
p = module.params
ssid = p.pop('ssid')
api_url = p.pop('api_url')
user = p.pop('api_username')
pwd = p.pop('api_password')
snapshot_group = p.pop('snapshot_group')
desired_state = p.pop('state')
if not api_url.endswith('/'):
api_url += '/'
if desired_state == 'create':
created_status, snapshot_id = create_image(module, ssid, api_url, pwd, user, p, snapshot_group)
if created_status:
module.exit_json(changed=True, msg='Created snapshot image', image_id=snapshot_id)
else:
module.fail_json(
msg="Could not create snapshot image on system %s, in snapshot group %s" % (ssid, snapshot_group))
else:
deleted, error_msg = delete_image(module, ssid, api_url, pwd, user, snapshot_group)
if deleted:
module.exit_json(changed=True, msg='Deleted snapshot image for snapshot group [%s]' % (snapshot_group))
else:
module.fail_json(
msg="Could not create snapshot image on system %s, in snapshot group %s --- %s" % (
ssid, snapshot_group, error_msg))
if __name__ == '__main__':
main()
| 35.391129 | 115 | 0.649653 |
af20eb27938d8aea2c9c8159b9a8598d08f22bb7
| 3,380 |
py
|
Python
|
tests/test_scaffold.py
|
abhishak3/ploomber
|
6041bcd381b7fd9a7525f94edd0ae1b03b14bb8d
|
[
"Apache-2.0"
] | 2,141 |
2020-02-14T02:34:34.000Z
|
2022-03-31T22:43:20.000Z
|
tests/test_scaffold.py
|
abhishak3/ploomber
|
6041bcd381b7fd9a7525f94edd0ae1b03b14bb8d
|
[
"Apache-2.0"
] | 660 |
2020-02-06T16:15:57.000Z
|
2022-03-31T22:55:01.000Z
|
tests/test_scaffold.py
|
abhishak3/ploomber
|
6041bcd381b7fd9a7525f94edd0ae1b03b14bb8d
|
[
"Apache-2.0"
] | 122 |
2020-02-14T18:53:05.000Z
|
2022-03-27T22:33:24.000Z
|
from pathlib import Path
import ast
import pytest
from ploomber import tasks
from ploomber import scaffold
@pytest.mark.parametrize('name', ['task.py', 'task.ipynb'])
@pytest.mark.parametrize('extract_upstream', [False, True])
@pytest.mark.parametrize('extract_product', [False, True])
def test_renders_valid_script(name, extract_product, extract_upstream):
loader = scaffold.ScaffoldLoader('ploomber_add')
out = loader.render(name,
params=dict(extract_product=extract_product,
extract_upstream=extract_upstream))
# make sure it generates valid python code, except for the sql template
if not name.endswith('.sql'):
ast.parse(out)
@pytest.mark.parametrize('extract_upstream', [False, True])
@pytest.mark.parametrize('extract_product', [False, True])
def test_renders_valid_function(extract_product, extract_upstream):
loader = scaffold.ScaffoldLoader('ploomber_add')
out = loader.render('function.py',
params=dict(function_name='some_function',
extract_product=extract_product,
extract_upstream=extract_upstream))
module = ast.parse(out)
assert module.body[0].name == 'some_function'
def test_create_function(backup_test_pkg, tmp_directory):
loader = scaffold.ScaffoldLoader('ploomber_add')
loader.create('test_pkg.functions.new_function',
dict(extract_product=False, extract_upstream=True),
tasks.PythonCallable)
code = Path(backup_test_pkg, 'functions.py').read_text()
module = ast.parse(code)
function_names = {
element.name
for element in module.body if hasattr(element, 'name')
}
assert 'new_function' in function_names
def test_add_task_from_scaffold(backup_test_pkg, tmp_directory):
yaml = """
meta:
source_loader:
module: test_pkg
extract_product: True
tasks:
- source: notebook.ipynb
- source: notebook.py
- source: test_pkg.functions.my_new_function
"""
Path('pipeline.yaml').write_text(yaml)
# FIXME: this will fail because TaskSpec validates that the
# dotted path actually exists. I think the cleanest solution
# is to add a special class method for DAGSpec that allows the lazy
# load to skip validating the last attribute...
spec, path_to_spec = scaffold.load_dag()
scaffold.add(spec, path_to_spec)
code = Path(backup_test_pkg, 'functions.py').read_text()
module = ast.parse(code)
function_names = {
element.name
for element in module.body if hasattr(element, 'name')
}
assert 'my_new_function' in function_names
assert Path(backup_test_pkg, 'notebook.ipynb').exists()
assert Path(backup_test_pkg, 'notebook.py').exists()
def test_add_task_when_using_import_tasks_from(tmp_directory):
spec = """
meta:
import_tasks_from: subdir/tasks.yaml
extract_product: True
tasks: []
"""
tasks = """
- source: notebook.py
"""
Path('pipeline.yaml').write_text(spec)
subdir = Path('subdir')
subdir.mkdir()
(subdir / 'tasks.yaml').write_text(tasks)
spec, path_to_spec = scaffold.load_dag()
scaffold.add(spec, path_to_spec)
assert (subdir / 'notebook.py').exists()
| 30.178571 | 75 | 0.668343 |
cf1e09d73f67a4c125a5ad0c8d9129f2639bda89
| 9,640 |
py
|
Python
|
aea/cli/scaffold.py
|
marcofavorito/agents-aea
|
e520f2f5d076a193514e194d94aa76c6423ac5bc
|
[
"Apache-2.0"
] | null | null | null |
aea/cli/scaffold.py
|
marcofavorito/agents-aea
|
e520f2f5d076a193514e194d94aa76c6423ac5bc
|
[
"Apache-2.0"
] | null | null | null |
aea/cli/scaffold.py
|
marcofavorito/agents-aea
|
e520f2f5d076a193514e194d94aa76c6423ac5bc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Implementation of the 'aea scaffold' subcommand."""
import os
import re
import shutil
from pathlib import Path
from typing import cast
import click
from jsonschema import ValidationError
from aea import AEA_DIR
from aea.cli.fingerprint import fingerprint_item
from aea.cli.utils.context import Context
from aea.cli.utils.decorators import check_aea_project, clean_after, pass_ctx
from aea.cli.utils.loggers import logger
from aea.cli.utils.package_utils import (
create_symlink_packages_to_vendor,
create_symlink_vendor_to_local,
validate_package_name,
)
from aea.configurations.base import PublicId
from aea.configurations.constants import ( # noqa: F401 # pylint: disable=unused-import
CONNECTION,
CONTRACT,
DEFAULT_AEA_CONFIG_FILE,
DEFAULT_CONNECTION_CONFIG_FILE,
DEFAULT_CONTRACT_CONFIG_FILE,
DEFAULT_PROTOCOL_CONFIG_FILE,
DEFAULT_SKILL_CONFIG_FILE,
DEFAULT_VERSION,
DOTTED_PATH_MODULE_ELEMENT_SEPARATOR,
PROTOCOL,
SCAFFOLD_PUBLIC_ID,
SKILL,
)
@click.group()
@click.option(
"--with-symlinks",
is_flag=True,
help="Add symlinks from vendor to non-vendor and packages to vendor folders.",
)
@click.pass_context
@check_aea_project
def scaffold(
click_context: click.core.Context, with_symlinks: bool
): # pylint: disable=unused-argument
"""Scaffold a package for the agent."""
ctx = cast(Context, click_context.obj)
ctx.set_config("with_symlinks", with_symlinks)
@scaffold.command()
@click.argument("connection_name", type=str, required=True)
@pass_ctx
def connection(ctx: Context, connection_name: str) -> None:
"""Add a connection scaffolding to the configuration file and agent."""
scaffold_item(ctx, CONNECTION, connection_name)
@scaffold.command()
@click.argument("contract_name", type=str, required=True)
@pass_ctx
def contract(ctx: Context, contract_name: str) -> None:
"""Add a contract scaffolding to the configuration file and agent."""
scaffold_item(ctx, CONTRACT, contract_name)
@scaffold.command()
@click.argument("protocol_name", type=str, required=True)
@click.option("-y", "--yes", is_flag=True, default=False)
@pass_ctx
def protocol(ctx: Context, protocol_name: str, yes: bool):
"""Add a protocol scaffolding to the configuration file and agent."""
if yes or click.confirm(
"We highly recommend auto-generating protocols with the aea generate command. Do you really want to continue scaffolding?"
):
scaffold_item(ctx, PROTOCOL, protocol_name)
else:
click.echo("Aborted. Exit") # pragma: nocover
@scaffold.command()
@click.argument("skill_name", type=str, required=True)
@pass_ctx
def skill(ctx: Context, skill_name: str):
"""Add a skill scaffolding to the configuration file and agent."""
scaffold_item(ctx, SKILL, skill_name)
@scaffold.command()
@pass_ctx
def decision_maker_handler(ctx: Context):
"""Add a decision maker scaffolding to the configuration file and agent."""
_scaffold_dm_handler(ctx)
@scaffold.command()
@pass_ctx
def error_handler(ctx: Context):
"""Add an error scaffolding to the configuration file and agent."""
_scaffold_error_handler(ctx)
@clean_after
def scaffold_item(ctx: Context, item_type: str, item_name: str) -> None:
"""
Add an item scaffolding to the configuration file and agent.
:param ctx: Context object.
:param item_type: type of item.
:param item_name: item name.
:return: None
:raises ClickException: if some error occures.
"""
validate_package_name(item_name)
author_name = ctx.agent_config.author
loader = getattr(ctx, f"{item_type}_loader")
default_config_filename = globals()[f"DEFAULT_{item_type.upper()}_CONFIG_FILE"]
item_type_plural = item_type + "s"
existing_ids = getattr(ctx.agent_config, f"{item_type}s")
existing_ids_only_author_and_name = map(lambda x: (x.author, x.name), existing_ids)
# check if we already have an item with the same public id
if (author_name, item_name) in existing_ids_only_author_and_name:
raise click.ClickException(
f"A {item_type} with name '{item_name}' already exists. Aborting..."
)
agent_name = ctx.agent_config.agent_name
click.echo(
f"Adding {item_type} scaffold '{item_name}' to the agent '{agent_name}'..."
)
# create the item folder
Path(item_type_plural).mkdir(exist_ok=True)
dest = os.path.join(item_type_plural, item_name)
if os.path.exists(dest):
raise click.ClickException(
f"A {item_type} with this name already exists. Please choose a different name and try again."
)
ctx.clean_paths.append(str(dest))
try:
# copy the item package into the agent project.
src = Path(os.path.join(AEA_DIR, item_type_plural, "scaffold"))
logger.debug(f"Copying {item_type} modules. src={src} dst={dest}")
shutil.copytree(src, dest)
# add the item to the configurations.
logger.debug(f"Registering the {item_type} into {DEFAULT_AEA_CONFIG_FILE}")
new_public_id = PublicId(author_name, item_name, DEFAULT_VERSION)
existing_ids.add(new_public_id)
with open(os.path.join(ctx.cwd, DEFAULT_AEA_CONFIG_FILE), "w") as fp:
ctx.agent_loader.dump(ctx.agent_config, fp)
# ensure the name in the yaml and the name of the folder are the same
config_filepath = Path(
ctx.cwd, item_type_plural, item_name, default_config_filename
)
with config_filepath.open() as fp:
config = loader.load(fp)
config.name = item_name
config.author = author_name
with config_filepath.open("w") as fp:
loader.dump(config, fp)
# update 'PUBLIC_ID' variable with the right public id
init_module = Path(dest, "__init__.py")
init_module.write_text(
re.sub(SCAFFOLD_PUBLIC_ID, str(new_public_id), init_module.read_text())
)
# fingerprint item.
fingerprint_item(ctx, item_type, new_public_id)
if ctx.config.get("with_symlinks", False):
click.echo(
"Adding symlinks from vendor to non-vendor and packages to vendor folders."
)
create_symlink_vendor_to_local(ctx, item_type, new_public_id)
create_symlink_packages_to_vendor(ctx)
except ValidationError:
raise click.ClickException(
f"Error when validating the {item_type} configuration file."
)
except Exception as e:
raise click.ClickException(str(e))
def _scaffold_dm_handler(ctx: Context):
"""Scaffold the decision maker handler."""
_scaffold_non_package_item(
ctx,
"decision_maker_handler",
"decision maker handler",
"DecisionMakerHandler",
"decision_maker",
)
def _scaffold_error_handler(ctx):
"""Scaffold the error handler."""
_scaffold_non_package_item(
ctx, "error_handler", "error handler", "ErrorHandler", "error_handler"
)
def _scaffold_non_package_item(
ctx: Context, item_type: str, type_name: str, class_name: str, aea_dir: str
):
"""
Scaffold a non-package item (e.g. decision maker handler, or error handler).
:param ctx: the CLI context.
:param item_type: the item type (e.g. 'decision_maker_handler')
:param type_name: the type name (e.g. "decision maker")
:param class_name: the class name (e.g. "DecisionMakerHandler")
:param aea_dir: the AEA directory that contains the scaffold module
:return: None
"""
existing_item = getattr(ctx.agent_config, item_type)
if existing_item != {}:
raise click.ClickException(
f"A {type_name} specification already exists. Aborting..."
)
dest = Path(f"{item_type}.py")
agent_name = ctx.agent_config.agent_name
click.echo(f"Adding {type_name} scaffold to the agent '{agent_name}'...")
# create the file name
dotted_path = f".{item_type}{DOTTED_PATH_MODULE_ELEMENT_SEPARATOR}{class_name}"
try:
# copy the item package into the agent project.
src = Path(os.path.join(AEA_DIR, aea_dir, "scaffold.py"))
logger.debug(f"Copying {type_name}. src={src} dst={dest}")
shutil.copyfile(src, dest)
# add the item to the configurations.
logger.debug(f"Registering the {type_name} into {DEFAULT_AEA_CONFIG_FILE}")
setattr(
ctx.agent_config,
item_type,
{
"dotted_path": str(dotted_path),
"file_path": str(os.path.join(".", dest)),
},
)
ctx.agent_loader.dump(
ctx.agent_config, open(os.path.join(ctx.cwd, DEFAULT_AEA_CONFIG_FILE), "w")
)
except Exception as e:
os.remove(dest)
raise click.ClickException(str(e))
| 34.551971 | 130 | 0.67832 |
da17db7c0809ec0610e6a0ca4a4af84bc300bfa6
| 1,990 |
py
|
Python
|
rooms-unified/main.py
|
tranhoangkhuongvn/my_unified_mahrl
|
d9cba06427a17a7f5feb4420412c6d8195bb0e1c
|
[
"MIT"
] | null | null | null |
rooms-unified/main.py
|
tranhoangkhuongvn/my_unified_mahrl
|
d9cba06427a17a7f5feb4420412c6d8195bb0e1c
|
[
"MIT"
] | null | null | null |
rooms-unified/main.py
|
tranhoangkhuongvn/my_unified_mahrl
|
d9cba06427a17a7f5feb4420412c6d8195bb0e1c
|
[
"MIT"
] | null | null | null |
import time
start_time = time.time()
from ExperienceReplayMemory import ExperienceReplayMemory
experience_memory = ExperienceReplayMemory(memory_size=10000)
from SubgoalDiscovery import SubgoalDiscovery
subgoal_discovery = SubgoalDiscovery(n_clusters=8,experience_memory=experience_memory)
import gym
from gym_rooms.envs import *
environment = 'Rooms-v0'
env = gym.make(environment)
from trainer import RandomWalk
random_walk = RandomWalk(env=env,subgoal_discovery=subgoal_discovery,experience_memory=experience_memory)
# lets random walk and find the subgoals such as centroids and outliers
random_walk.walk()
outliers = subgoal_discovery.outliers
centroids = subgoal_discovery.centroid_subgoals
subgoals = subgoal_discovery.G
randomwalk_USD_time = time.time()
print('Elapse time for subgoal discovery: ', randomwalk_USD_time-start_time)
from hrl import Controller
controller = Controller(subgoal_discovery=subgoal_discovery)
env.cross_hallway = True
from trainer import PretrainController
pretainer = PretrainController( env=env,
controller=controller,
subgoal_discovery=subgoal_discovery)
pretainer.train()
# pretainer.controller.Q.save_model()
# pretainer.controller.Q.load_model()
from hrl import MetaController
meta_controller = MetaController(subgoal_discovery=subgoal_discovery)
from trainer import MetaControllerController
meta_controller_trainer = MetaControllerController( env=env,
controller=pretainer.controller,
meta_controller=meta_controller,
subgoal_discovery=subgoal_discovery)
meta_controller_trainer.train()
# from trainer import MetaControllerControllerUnified
# meta_controller_controller_trainer = MetaControllerControllerUnified( env=env,
# controller=pretainer.controller,
# meta_controller=meta_controller,
# subgoal_discovery=subgoal_discovery)
# meta_controller_controller_trainer.train()
# from trainer import VanillaRL
# vanilla_rl = VanillaRL(env=env)
# vanilla_rl.train()
| 31.09375 | 105 | 0.819095 |
c2cc434374970c86de0f8b3a55036da2ec975230
| 13,656 |
py
|
Python
|
python/ray/tests/test_runtime_env_packaging.py
|
takeshi-yoshimura/ray
|
cc577c10edbfc8b4248e2776947e1e0d5dbf4585
|
[
"Apache-2.0"
] | 1 |
2022-03-14T04:24:17.000Z
|
2022-03-14T04:24:17.000Z
|
python/ray/tests/test_runtime_env_packaging.py
|
takeshi-yoshimura/ray
|
cc577c10edbfc8b4248e2776947e1e0d5dbf4585
|
[
"Apache-2.0"
] | 21 |
2022-01-30T15:49:41.000Z
|
2022-03-19T07:14:33.000Z
|
python/ray/tests/test_runtime_env_packaging.py
|
takeshi-yoshimura/ray
|
cc577c10edbfc8b4248e2776947e1e0d5dbf4585
|
[
"Apache-2.0"
] | null | null | null |
import os
from pathlib import Path
import random
from shutil import copytree, rmtree, make_archive
import string
import sys
import tempfile
from filecmp import dircmp
import uuid
import pytest
from ray.ray_constants import KV_NAMESPACE_PACKAGE
from ray.experimental.internal_kv import _internal_kv_del, _internal_kv_exists
from ray._private.runtime_env.packaging import (
_dir_travel,
get_local_dir_from_uri,
get_uri_for_directory,
_get_excludes,
upload_package_if_needed,
parse_uri,
Protocol,
get_top_level_dir_from_compressed_package,
remove_dir_from_filepaths,
unzip_package,
)
TOP_LEVEL_DIR_NAME = "top_level"
ARCHIVE_NAME = "archive.zip"
def random_string(size: int = 10):
return "".join(random.choice(string.ascii_uppercase) for _ in range(size))
@pytest.fixture
def empty_dir():
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
@pytest.fixture
def random_dir():
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir)
subdir = path / "subdir"
subdir.mkdir(parents=True)
for _ in range(10):
p1 = path / random_string(10)
with p1.open("w") as f1:
f1.write(random_string(100))
p2 = path / random_string(10)
with p2.open("w") as f2:
f2.write(random_string(200))
yield tmp_dir
@pytest.fixture
def random_zip_file_without_top_level_dir(random_dir):
path = Path(random_dir)
make_archive(path / ARCHIVE_NAME[: ARCHIVE_NAME.rfind(".")], "zip", path)
yield str(path / ARCHIVE_NAME)
@pytest.fixture
def random_zip_file_with_top_level_dir():
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir)
top_level_dir = path / TOP_LEVEL_DIR_NAME
top_level_dir.mkdir(parents=True)
next_level_dir = top_level_dir
for _ in range(10):
p1 = next_level_dir / random_string(10)
with p1.open("w") as f1:
f1.write(random_string(100))
p2 = next_level_dir / random_string(10)
with p2.open("w") as f2:
f2.write(random_string(200))
dir1 = next_level_dir / random_string(15)
dir1.mkdir(parents=True)
dir2 = next_level_dir / random_string(15)
dir2.mkdir(parents=True)
next_level_dir = dir2
make_archive(
path / ARCHIVE_NAME[: ARCHIVE_NAME.rfind(".")],
"zip",
path,
TOP_LEVEL_DIR_NAME,
)
yield str(path / ARCHIVE_NAME)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
class TestGetURIForDirectory:
def test_invalid_directory(self):
with pytest.raises(ValueError):
get_uri_for_directory("/does/not/exist")
with pytest.raises(ValueError):
get_uri_for_directory("does/not/exist")
def test_determinism(self, random_dir):
# Check that it's deterministic for same data.
uris = {get_uri_for_directory(random_dir) for _ in range(10)}
assert len(uris) == 1
# Add one file, should be different now.
with open(Path(random_dir) / f"test_{random_string}", "w") as f:
f.write(random_string())
assert {get_uri_for_directory(random_dir)} != uris
def test_relative_paths(self, random_dir):
# Check that relative or absolute paths result in the same URI.
p = Path(random_dir)
relative_uri = get_uri_for_directory(os.path.relpath(p))
absolute_uri = get_uri_for_directory(p.resolve())
assert relative_uri == absolute_uri
def test_excludes(self, random_dir):
# Excluding a directory should modify the URI.
included_uri = get_uri_for_directory(random_dir)
excluded_uri = get_uri_for_directory(random_dir, excludes=["subdir"])
assert included_uri != excluded_uri
# Excluding a directory should be the same as deleting it.
rmtree((Path(random_dir) / "subdir").resolve())
deleted_uri = get_uri_for_directory(random_dir)
assert deleted_uri == excluded_uri
def test_empty_directory(self):
try:
os.mkdir("d1")
os.mkdir("d2")
assert get_uri_for_directory("d1") == get_uri_for_directory("d2")
finally:
os.rmdir("d1")
os.rmdir("d2")
def test_uri_hash_length(self, random_dir):
uri = get_uri_for_directory(random_dir)
hex_hash = uri.split("_")[-1][: -len(".zip")]
assert len(hex_hash) == 16
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
class TestUploadPackageIfNeeded:
def test_create_upload_once(self, empty_dir, random_dir, ray_start_regular):
uri = get_uri_for_directory(random_dir)
uploaded = upload_package_if_needed(uri, empty_dir, random_dir)
assert uploaded
assert _internal_kv_exists(uri, namespace=KV_NAMESPACE_PACKAGE)
uploaded = upload_package_if_needed(uri, empty_dir, random_dir)
assert not uploaded
assert _internal_kv_exists(uri, namespace=KV_NAMESPACE_PACKAGE)
# Delete the URI from the internal_kv. This should trigger re-upload.
_internal_kv_del(uri, namespace=KV_NAMESPACE_PACKAGE)
assert not _internal_kv_exists(uri, namespace=KV_NAMESPACE_PACKAGE)
uploaded = upload_package_if_needed(uri, empty_dir, random_dir)
assert uploaded
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
class TestGetTopLevelDirFromCompressedPackage:
def test_get_top_level_valid(self, random_zip_file_with_top_level_dir):
top_level_dir_name = get_top_level_dir_from_compressed_package(
str(random_zip_file_with_top_level_dir)
)
assert top_level_dir_name == TOP_LEVEL_DIR_NAME
def test_get_top_level_invalid(self, random_zip_file_without_top_level_dir):
top_level_dir_name = get_top_level_dir_from_compressed_package(
str(random_zip_file_without_top_level_dir)
)
assert top_level_dir_name is None
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
class TestRemoveDirFromFilepaths:
def test_valid_removal(self, random_zip_file_with_top_level_dir):
# This test copies the TOP_LEVEL_DIR_NAME directory, and then it
# shifts the contents of the copied directory into the base tmp_path
# directory. Then it compares the contents of tmp_path with the
# TOP_LEVEL_DIR_NAME directory to ensure that they match.
archive_path = random_zip_file_with_top_level_dir
tmp_path = archive_path[: archive_path.rfind("/")]
original_dir_path = os.path.join(tmp_path, TOP_LEVEL_DIR_NAME)
copy_dir_path = os.path.join(tmp_path, TOP_LEVEL_DIR_NAME + "_copy")
copytree(original_dir_path, copy_dir_path)
remove_dir_from_filepaths(tmp_path, TOP_LEVEL_DIR_NAME + "_copy")
dcmp = dircmp(tmp_path, f"{tmp_path}/{TOP_LEVEL_DIR_NAME}")
# Since this test uses the tmp_path as the target directory, and since
# the tmp_path also contains the zip file and the top level directory,
# make sure that the only difference between the tmp_path's contents
# and the top level directory's contents are the zip file from the
# Pytest fixture and the top level directory itself. This implies that
# all files have been extracted from the top level directory and moved
# into the tmp_path.
assert set(dcmp.left_only) == {ARCHIVE_NAME, TOP_LEVEL_DIR_NAME}
# Make sure that all the subdirectories and files have been moved to
# the target directory
assert len(dcmp.right_only) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("remove_top_level_directory", [False, True])
@pytest.mark.parametrize("unlink_zip", [False, True])
class TestUnzipPackage:
def dcmp_helper(
self, remove_top_level_directory, unlink_zip, tmp_subdir, tmp_path, archive_path
):
dcmp = None
if remove_top_level_directory:
dcmp = dircmp(f"{tmp_subdir}", f"{tmp_path}/{TOP_LEVEL_DIR_NAME}")
else:
dcmp = dircmp(
f"{tmp_subdir}/{TOP_LEVEL_DIR_NAME}", f"{tmp_path}/{TOP_LEVEL_DIR_NAME}"
)
assert len(dcmp.left_only) == 0
assert len(dcmp.right_only) == 0
if unlink_zip:
assert not Path(archive_path).is_file()
else:
assert Path(archive_path).is_file()
def test_unzip_package(
self, random_zip_file_with_top_level_dir, remove_top_level_directory, unlink_zip
):
archive_path = random_zip_file_with_top_level_dir
tmp_path = archive_path[: archive_path.rfind("/")]
tmp_subdir = f"{tmp_path}/{TOP_LEVEL_DIR_NAME}_tmp"
unzip_package(
package_path=archive_path,
target_dir=tmp_subdir,
remove_top_level_directory=remove_top_level_directory,
unlink_zip=unlink_zip,
)
self.dcmp_helper(
remove_top_level_directory, unlink_zip, tmp_subdir, tmp_path, archive_path
)
def test_unzip_with_matching_subdirectory_names(
self, remove_top_level_directory, unlink_zip
):
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir)
top_level_dir = path / TOP_LEVEL_DIR_NAME
top_level_dir.mkdir(parents=True)
next_level_dir = top_level_dir
for _ in range(10):
dir1 = next_level_dir / TOP_LEVEL_DIR_NAME
dir1.mkdir(parents=True)
next_level_dir = dir1
make_archive(
path / ARCHIVE_NAME[: ARCHIVE_NAME.rfind(".")],
"zip",
path,
TOP_LEVEL_DIR_NAME,
)
archive_path = str(path / ARCHIVE_NAME)
tmp_path = archive_path[: archive_path.rfind("/")]
tmp_subdir = f"{tmp_path}/{TOP_LEVEL_DIR_NAME}_tmp"
unzip_package(
package_path=archive_path,
target_dir=tmp_subdir,
remove_top_level_directory=remove_top_level_directory,
unlink_zip=unlink_zip,
)
self.dcmp_helper(
remove_top_level_directory,
unlink_zip,
tmp_subdir,
tmp_path,
archive_path,
)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_travel():
with tempfile.TemporaryDirectory() as tmp_dir:
dir_paths = set()
file_paths = set()
item_num = 0
excludes = []
root = Path(tmp_dir) / "test"
def construct(path, excluded=False, depth=0):
nonlocal item_num
path.mkdir(parents=True)
if not excluded:
dir_paths.add(str(path))
if depth > 8:
return
if item_num > 500:
return
dir_num = random.randint(0, 10)
file_num = random.randint(0, 10)
for _ in range(dir_num):
uid = str(uuid.uuid4()).split("-")[0]
dir_path = path / uid
exclud_sub = random.randint(0, 5) == 0
if not excluded and exclud_sub:
excludes.append(str(dir_path.relative_to(root)))
if not excluded:
construct(dir_path, exclud_sub or excluded, depth + 1)
item_num += 1
if item_num > 1000:
return
for _ in range(file_num):
uid = str(uuid.uuid4()).split("-")[0]
with (path / uid).open("w") as f:
v = random.randint(0, 1000)
f.write(str(v))
if not excluded:
if random.randint(0, 5) == 0:
excludes.append(str((path / uid).relative_to(root)))
else:
file_paths.add((str(path / uid), str(v)))
item_num += 1
construct(root)
exclude_spec = _get_excludes(root, excludes)
visited_dir_paths = set()
visited_file_paths = set()
def handler(path):
if path.is_dir():
visited_dir_paths.add(str(path))
else:
with open(path) as f:
visited_file_paths.add((str(path), f.read()))
_dir_travel(root, [exclude_spec], handler)
assert file_paths == visited_file_paths
assert dir_paths == visited_dir_paths
@pytest.mark.parametrize(
"parsing_tuple",
[
("gcs://file.zip", Protocol.GCS, "file.zip"),
("s3://bucket/file.zip", Protocol.S3, "s3_bucket_file.zip"),
("https://test.com/file.zip", Protocol.HTTPS, "https_test_com_file.zip"),
("gs://bucket/file.zip", Protocol.GS, "gs_bucket_file.zip"),
],
)
def test_parsing(parsing_tuple):
uri, protocol, package_name = parsing_tuple
parsed_protocol, parsed_package_name = parse_uri(uri)
assert protocol == parsed_protocol
assert package_name == parsed_package_name
def test_get_local_dir_from_uri():
uri = "gcs://<working_dir_content_hash>.zip"
assert get_local_dir_from_uri(uri, "base_dir") == Path(
"base_dir/<working_dir_content_hash>"
)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| 36.416 | 88 | 0.634593 |
662cd7c6d6c9e30a30f7bfed2197e882005ab070
| 62,492 |
py
|
Python
|
tests/mail/tests.py
|
qedsoftware/django
|
b5fc192b99ce92a7ccad08cca7b59b1a4e7ca230
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/mail/tests.py
|
qedsoftware/django
|
b5fc192b99ce92a7ccad08cca7b59b1a4e7ca230
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/mail/tests.py
|
qedsoftware/django
|
b5fc192b99ce92a7ccad08cca7b59b1a4e7ca230
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import asyncore
import base64
import mimetypes
import os
import shutil
import smtpd
import socket
import sys
import tempfile
import threading
from email.header import Header
from email.mime.text import MIMEText
from smtplib import SMTP, SMTPAuthenticationError, SMTPException
from ssl import SSLError
from django.core import mail
from django.core.mail import (
EmailMessage, EmailMultiAlternatives, mail_admins, mail_managers,
send_mail, send_mass_mail,
)
from django.core.mail.backends import console, dummy, filebased, locmem, smtp
from django.core.mail.message import BadHeaderError, sanitize_address
from django.test import SimpleTestCase, override_settings
from django.test.utils import requires_tz_support
from django.utils._os import upath
from django.utils.encoding import force_bytes, force_text
from django.utils.six import PY3, StringIO, binary_type
from django.utils.translation import ugettext_lazy
if PY3:
from email.utils import parseaddr
from email import message_from_bytes, message_from_binary_file
else:
from email.Utils import parseaddr
from email import (
message_from_string as message_from_bytes,
message_from_file as message_from_binary_file,
)
class HeadersCheckMixin(object):
def assertMessageHasHeaders(self, message, headers):
"""
Check that :param message: has all :param headers: headers.
:param message: can be an instance of an email.Message subclass or a
string with the contents of an email message.
:param headers: should be a set of (header-name, header-value) tuples.
"""
if isinstance(message, binary_type):
message = message_from_bytes(message)
msg_headers = set(message.items())
self.assertTrue(headers.issubset(msg_headers), msg='Message is missing '
'the following headers: %s' % (headers - msg_headers),)
class MailTests(HeadersCheckMixin, SimpleTestCase):
"""
Non-backend specific tests.
"""
def get_decoded_attachments(self, django_message):
"""
Encode the specified django.core.mail.message.EmailMessage, then decode
it using Python's email.parser module and, for each attachment of the
message, return a list of tuples with (filename, content, mimetype).
"""
msg_bytes = django_message.message().as_bytes()
email_message = message_from_bytes(msg_bytes)
def iter_attachments():
for i in email_message.walk():
# Once support for Python<3.5 has been dropped, we can use
# i.get_content_disposition() here instead.
content_disposition = i.get('content-disposition', '').split(';')[0].lower()
if content_disposition == 'attachment':
filename = i.get_filename()
content = i.get_payload(decode=True)
mimetype = i.get_content_type()
yield filename, content, mimetype
return list(iter_attachments())
def test_ascii(self):
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], '[email protected]')
self.assertEqual(message['To'], '[email protected]')
def test_multiple_recipients(self):
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], '[email protected]')
self.assertEqual(message['To'], '[email protected], [email protected]')
def test_cc(self):
"""Regression test for #7722"""
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], cc=['[email protected]'])
message = email.message()
self.assertEqual(message['Cc'], '[email protected]')
self.assertEqual(email.recipients(), ['[email protected]', '[email protected]'])
# Test multiple CC with multiple To
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'],
cc=['[email protected]', '[email protected]']
)
message = email.message()
self.assertEqual(message['Cc'], '[email protected], [email protected]')
self.assertEqual(
email.recipients(),
['[email protected]', '[email protected]', '[email protected]', '[email protected]']
)
# Testing with Bcc
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'],
cc=['[email protected]', '[email protected]'], bcc=['[email protected]']
)
message = email.message()
self.assertEqual(message['Cc'], '[email protected], [email protected]')
self.assertEqual(
email.recipients(),
['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]']
)
def test_reply_to(self):
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
reply_to=['[email protected]'],
)
message = email.message()
self.assertEqual(message['Reply-To'], '[email protected]')
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
reply_to=['[email protected]', '[email protected]']
)
message = email.message()
self.assertEqual(message['Reply-To'], '[email protected], [email protected]')
def test_recipients_as_tuple(self):
email = EmailMessage(
'Subject', 'Content', '[email protected]', ('[email protected]', '[email protected]'),
cc=('[email protected]', '[email protected]'), bcc=('[email protected]',)
)
message = email.message()
self.assertEqual(message['Cc'], '[email protected], [email protected]')
self.assertEqual(
email.recipients(),
['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]']
)
def test_recipients_as_string(self):
with self.assertRaisesMessage(TypeError, '"to" argument must be a list or tuple'):
EmailMessage(to='[email protected]')
with self.assertRaisesMessage(TypeError, '"cc" argument must be a list or tuple'):
EmailMessage(cc='[email protected]')
with self.assertRaisesMessage(TypeError, '"bcc" argument must be a list or tuple'):
EmailMessage(bcc='[email protected]')
with self.assertRaisesMessage(TypeError, '"reply_to" argument must be a list or tuple'):
EmailMessage(reply_to='[email protected]')
def test_header_injection(self):
email = EmailMessage('Subject\nInjection Test', 'Content', '[email protected]', ['[email protected]'])
with self.assertRaises(BadHeaderError):
email.message()
email = EmailMessage(
ugettext_lazy('Subject\nInjection Test'), 'Content', '[email protected]', ['[email protected]']
)
with self.assertRaises(BadHeaderError):
email.message()
def test_space_continuation(self):
"""
Test for space continuation character in long (ASCII) subject headers (#7747)
"""
email = EmailMessage(
'Long subject lines that get wrapped should contain a space '
'continuation character to get expected behavior in Outlook and Thunderbird',
'Content', '[email protected]', ['[email protected]']
)
message = email.message()
# Note that in Python 3, maximum line length has increased from 76 to 78
self.assertEqual(
message['Subject'].encode(),
b'Long subject lines that get wrapped should contain a space continuation\n'
b' character to get expected behavior in Outlook and Thunderbird'
)
def test_message_header_overrides(self):
"""
Specifying dates or message-ids in the extra headers overrides the
default values (#9233)
"""
headers = {"date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
email = EmailMessage('subject', 'content', '[email protected]', ['[email protected]'], headers=headers)
self.assertMessageHasHeaders(email.message(), {
('Content-Transfer-Encoding', '7bit'),
('Content-Type', 'text/plain; charset="utf-8"'),
('From', '[email protected]'),
('MIME-Version', '1.0'),
('Message-ID', 'foo'),
('Subject', 'subject'),
('To', '[email protected]'),
('date', 'Fri, 09 Nov 2001 01:08:47 -0000'),
})
def test_from_header(self):
"""
Make sure we can manually set the From header (#9214)
"""
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
message = email.message()
self.assertEqual(message['From'], '[email protected]')
def test_to_header(self):
"""
Make sure we can manually set the To header (#17444)
"""
email = EmailMessage('Subject', 'Content', '[email protected]',
['[email protected]', '[email protected]'],
headers={'To': '[email protected]'})
message = email.message()
self.assertEqual(message['To'], '[email protected]')
self.assertEqual(email.to, ['[email protected]', '[email protected]'])
# If we don't set the To header manually, it should default to the `to` argument to the constructor
email = EmailMessage('Subject', 'Content', '[email protected]',
['[email protected]', '[email protected]'])
message = email.message()
self.assertEqual(message['To'], '[email protected], [email protected]')
self.assertEqual(email.to, ['[email protected]', '[email protected]'])
def test_reply_to_header(self):
"""
Specifying 'Reply-To' in headers should override reply_to.
"""
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
reply_to=['[email protected]'], headers={'Reply-To': '[email protected]'},
)
message = email.message()
self.assertEqual(message['Reply-To'], '[email protected]')
def test_multiple_message_call(self):
"""
Regression for #13259 - Make sure that headers are not changed when
calling EmailMessage.message()
"""
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
message = email.message()
self.assertEqual(message['From'], '[email protected]')
message = email.message()
self.assertEqual(message['From'], '[email protected]')
def test_unicode_address_header(self):
"""
Regression for #11144 - When a to/from/cc header contains unicode,
make sure the email addresses are parsed correctly (especially with
regards to commas)
"""
email = EmailMessage(
'Subject', 'Content', '[email protected]',
['"Firstname Sürname" <[email protected]>', '[email protected]'],
)
self.assertEqual(
email.message()['To'],
'=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>, [email protected]'
)
email = EmailMessage(
'Subject', 'Content', '[email protected]',
['"Sürname, Firstname" <[email protected]>', '[email protected]'],
)
self.assertEqual(
email.message()['To'],
'=?utf-8?q?S=C3=BCrname=2C_Firstname?= <[email protected]>, [email protected]'
)
def test_unicode_headers(self):
email = EmailMessage("Gżegżółka", "Content", "[email protected]", ["[email protected]"],
headers={"Sender": '"Firstname Sürname" <[email protected]>',
"Comments": 'My Sürname is non-ASCII'})
message = email.message()
self.assertEqual(message['Subject'], '=?utf-8?b?R8W8ZWfFvMOzxYJrYQ==?=')
self.assertEqual(message['Sender'], '=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>')
self.assertEqual(message['Comments'], '=?utf-8?q?My_S=C3=BCrname_is_non-ASCII?=')
def test_safe_mime_multipart(self):
"""
Make sure headers can be set with a different encoding than utf-8 in
SafeMIMEMultipart as well
"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
from_email, to = '[email protected]', '"Sürname, Firstname" <[email protected]>'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives('Message from Firstname Sürname', text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.encoding = 'iso-8859-1'
self.assertEqual(msg.message()['To'], '=?iso-8859-1?q?S=FCrname=2C_Firstname?= <[email protected]>')
self.assertEqual(msg.message()['Subject'], '=?iso-8859-1?q?Message_from_Firstname_S=FCrname?=')
def test_encoding(self):
"""
Regression for #12791 - Encode body correctly with other encodings
than utf-8
"""
email = EmailMessage('Subject', 'Firstname Sürname is a great guy.', '[email protected]', ['[email protected]'])
email.encoding = 'iso-8859-1'
message = email.message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable'),
('Subject', 'Subject'),
('From', '[email protected]'),
('To', '[email protected]')})
self.assertEqual(message.get_payload(), 'Firstname S=FCrname is a great guy.')
# Make sure MIME attachments also works correctly with other encodings than utf-8
text_content = 'Firstname Sürname is a great guy.'
html_content = '<p>Firstname Sürname is a <strong>great</strong> guy.</p>'
msg = EmailMultiAlternatives('Subject', text_content, '[email protected]', ['[email protected]'])
msg.encoding = 'iso-8859-1'
msg.attach_alternative(html_content, "text/html")
payload0 = msg.message().get_payload(0)
self.assertMessageHasHeaders(payload0, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable')})
self.assertTrue(payload0.as_bytes().endswith(b'\n\nFirstname S=FCrname is a great guy.'))
payload1 = msg.message().get_payload(1)
self.assertMessageHasHeaders(payload1, {
('MIME-Version', '1.0'),
('Content-Type', 'text/html; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable')})
self.assertTrue(
payload1.as_bytes().endswith(b'\n\n<p>Firstname S=FCrname is a <strong>great</strong> guy.</p>')
)
def test_attachments(self):
"""Regression test for #9367"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', '[email protected]', '[email protected]'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives(subject, text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.attach("an attachment.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_content_type(), 'multipart/mixed')
self.assertEqual(message.get_default_type(), 'text/plain')
payload = message.get_payload()
self.assertEqual(payload[0].get_content_type(), 'multipart/alternative')
self.assertEqual(payload[1].get_content_type(), 'application/pdf')
def test_non_ascii_attachment_filename(self):
"""Regression test for #14964"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', '[email protected]', '[email protected]'
content = 'This is the message.'
msg = EmailMessage(subject, content, from_email, [to], headers=headers)
# Unicode in file name
msg.attach("une pièce jointe.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
payload = message.get_payload()
self.assertEqual(payload[1].get_filename(), 'une pièce jointe.pdf')
def test_attach_file(self):
"""
Test attaching a file against different mimetypes and make sure that
a file will be attached and sent properly even if an invalid mimetype
is specified.
"""
files = (
# filename, actual mimetype
('file.txt', 'text/plain'),
('file.png', 'image/png'),
('file_txt', None),
('file_png', None),
('file_txt.png', 'image/png'),
('file_png.txt', 'text/plain'),
)
test_mimetypes = ['text/plain', 'image/png', None]
for basename, real_mimetype in files:
for mimetype in test_mimetypes:
email = EmailMessage('subject', 'body', '[email protected]', ['[email protected]'])
self.assertEqual(mimetypes.guess_type(basename)[0], real_mimetype)
self.assertEqual(email.attachments, [])
file_path = os.path.join(os.path.dirname(upath(__file__)), 'attachments', basename)
email.attach_file(file_path, mimetype=mimetype)
self.assertEqual(len(email.attachments), 1)
self.assertIn(basename, email.attachments[0])
msgs_sent_num = email.send()
self.assertEqual(msgs_sent_num, 1)
def test_attach_text_as_bytes(self):
msg = EmailMessage('subject', 'body', '[email protected]', ['[email protected]'])
msg.attach('file.txt', b'file content')
# Check that the message would be sent at all.
sent_num = msg.send()
self.assertEqual(sent_num, 1)
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, 'file.txt')
self.assertEqual(content, b'file content')
self.assertEqual(mimetype, 'text/plain')
def test_attach_utf8_text_as_bytes(self):
"""
Non-ASCII characters encoded as valid UTF-8 are correctly transported
and decoded.
"""
msg = EmailMessage('subject', 'body', '[email protected]', ['[email protected]'])
msg.attach('file.txt', b'\xc3\xa4') # UTF-8 encoded a umlaut.
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, 'file.txt')
self.assertEqual(content, b'\xc3\xa4')
self.assertEqual(mimetype, 'text/plain')
def test_attach_non_utf8_text_as_bytes(self):
"""
Binary data that can't be decoded as UTF-8 overrides the MIME type
instead of decoding the data.
"""
msg = EmailMessage('subject', 'body', '[email protected]', ['[email protected]'])
msg.attach('file.txt', b'\xff') # Invalid UTF-8.
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, 'file.txt')
# Content should be passed through unmodified.
self.assertEqual(content, b'\xff')
self.assertEqual(mimetype, 'application/octet-stream')
def test_dummy_backend(self):
"""
Make sure that dummy backends returns correct number of sent messages
"""
connection = dummy.EmailBackend()
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
self.assertEqual(connection.send_messages([email, email, email]), 3)
def test_arbitrary_keyword(self):
"""
Make sure that get_connection() accepts arbitrary keyword that might be
used with custom backends.
"""
c = mail.get_connection(fail_silently=True, foo='bar')
self.assertTrue(c.fail_silently)
def test_custom_backend(self):
"""Test custom backend defined in this suite."""
conn = mail.get_connection('mail.custombackend.EmailBackend')
self.assertTrue(hasattr(conn, 'test_outbox'))
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
conn.send_messages([email])
self.assertEqual(len(conn.test_outbox), 1)
def test_backend_arg(self):
"""Test backend argument of mail.get_connection()"""
self.assertIsInstance(mail.get_connection('django.core.mail.backends.smtp.EmailBackend'), smtp.EmailBackend)
self.assertIsInstance(
mail.get_connection('django.core.mail.backends.locmem.EmailBackend'),
locmem.EmailBackend
)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.dummy.EmailBackend'), dummy.EmailBackend)
self.assertIsInstance(
mail.get_connection('django.core.mail.backends.console.EmailBackend'),
console.EmailBackend
)
tmp_dir = tempfile.mkdtemp()
try:
self.assertIsInstance(
mail.get_connection('django.core.mail.backends.filebased.EmailBackend', file_path=tmp_dir),
filebased.EmailBackend
)
finally:
shutil.rmtree(tmp_dir)
self.assertIsInstance(mail.get_connection(), locmem.EmailBackend)
@override_settings(
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
ADMINS=[('nobody', '[email protected]')],
MANAGERS=[('nobody', '[email protected]')])
def test_connection_arg(self):
"""Test connection argument to send_mail(), et. al."""
mail.outbox = []
# Send using non-default connection
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, 'Subject')
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mass_mail([
('Subject1', 'Content1', '[email protected]', ['[email protected]']),
('Subject2', 'Content2', '[email protected]', ['[email protected]']),
], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 2)
self.assertEqual(connection.test_outbox[0].subject, 'Subject1')
self.assertEqual(connection.test_outbox[1].subject, 'Subject2')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_admins('Admin message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Admin message')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_managers('Manager message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Manager message')
def test_dont_mangle_from_in_body(self):
# Regression for #13433 - Make sure that EmailMessage doesn't mangle
# 'From ' in message body.
email = EmailMessage(
'Subject', 'From the future', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
self.assertNotIn(b'>From the future', email.message().as_bytes())
def test_dont_base64_encode(self):
# Ticket #3472
# Shouldn't use Base64 encoding at all
msg = EmailMessage(
'Subject', 'UTF-8 encoded body', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
self.assertIn(b'Content-Transfer-Encoding: 7bit', msg.message().as_bytes())
# Ticket #11212
# Shouldn't use quoted printable, should detect it can represent content with 7 bit data
msg = EmailMessage(
'Subject', 'Body with only ASCII characters.', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
s = msg.message().as_bytes()
self.assertIn(b'Content-Transfer-Encoding: 7bit', s)
# Shouldn't use quoted printable, should detect it can represent content with 8 bit data
msg = EmailMessage(
'Subject', 'Body with latin characters: àáä.', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
s = msg.message().as_bytes()
self.assertIn(b'Content-Transfer-Encoding: 8bit', s)
s = msg.message().as_string()
self.assertIn(str('Content-Transfer-Encoding: 8bit'), s)
msg = EmailMessage(
'Subject', 'Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', '[email protected]',
['[email protected]'], headers={'From': '[email protected]'},
)
s = msg.message().as_bytes()
self.assertIn(b'Content-Transfer-Encoding: 8bit', s)
s = msg.message().as_string()
self.assertIn(str('Content-Transfer-Encoding: 8bit'), s)
def test_dont_base64_encode_message_rfc822(self):
# Ticket #18967
# Shouldn't use base64 encoding for a child EmailMessage attachment.
# Create a child message first
child_msg = EmailMessage(
'Child Subject', 'Some body of child message', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
child_s = child_msg.message().as_string()
# Now create a parent
parent_msg = EmailMessage(
'Parent Subject', 'Some parent body', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
# Attach to parent as a string
parent_msg.attach(content=child_s, mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# Verify that the child message header is not base64 encoded
self.assertIn(str('Child Subject'), parent_s)
# Feature test: try attaching email.Message object directly to the mail.
parent_msg = EmailMessage(
'Parent Subject', 'Some parent body', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
parent_msg.attach(content=child_msg.message(), mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# Verify that the child message header is not base64 encoded
self.assertIn(str('Child Subject'), parent_s)
# Feature test: try attaching Django's EmailMessage object directly to the mail.
parent_msg = EmailMessage(
'Parent Subject', 'Some parent body', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
parent_msg.attach(content=child_msg, mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# Verify that the child message header is not base64 encoded
self.assertIn(str('Child Subject'), parent_s)
def test_sanitize_address(self):
"""
Email addresses are properly sanitized.
"""
# Simple ASCII address - string form
self.assertEqual(sanitize_address('[email protected]', 'ascii'), '[email protected]')
self.assertEqual(sanitize_address('[email protected]', 'utf-8'), '[email protected]')
# Bytestrings are transformed to normal strings.
self.assertEqual(sanitize_address(b'[email protected]', 'utf-8'), '[email protected]')
# Simple ASCII address - tuple form
self.assertEqual(
sanitize_address(('A name', '[email protected]'), 'ascii'),
'A name <[email protected]>'
)
if PY3:
self.assertEqual(
sanitize_address(('A name', '[email protected]'), 'utf-8'),
'=?utf-8?q?A_name?= <[email protected]>'
)
else:
self.assertEqual(
sanitize_address(('A name', '[email protected]'), 'utf-8'),
'A name <[email protected]>'
)
# Unicode characters are are supported in RFC-6532.
self.assertEqual(
sanitize_address('tó@example.com', 'utf-8'),
'[email protected]'
)
self.assertEqual(
sanitize_address(('Tó Example', 'tó@example.com'), 'utf-8'),
'=?utf-8?q?T=C3=B3_Example?= <[email protected]>'
)
@requires_tz_support
class MailTimeZoneTests(SimpleTestCase):
@override_settings(EMAIL_USE_LOCALTIME=False, USE_TZ=True, TIME_ZONE='Africa/Algiers')
def test_date_header_utc(self):
"""
EMAIL_USE_LOCALTIME=False creates a datetime in UTC.
"""
email = EmailMessage('Subject', 'Body', '[email protected]', ['[email protected]'])
self.assertTrue(email.message()['Date'].endswith('-0000'))
@override_settings(EMAIL_USE_LOCALTIME=True, USE_TZ=True, TIME_ZONE='Africa/Algiers')
def test_date_header_localtime(self):
"""
EMAIL_USE_LOCALTIME=True creates a datetime in the local time zone.
"""
email = EmailMessage('Subject', 'Body', '[email protected]', ['[email protected]'])
self.assertTrue(email.message()['Date'].endswith('+0100')) # Africa/Algiers is UTC+1
class PythonGlobalState(SimpleTestCase):
"""
Tests for #12422 -- Django smarts (#2472/#11212) with charset of utf-8 text
parts shouldn't pollute global email Python package charset registry when
django.mail.message is imported.
"""
def test_utf8(self):
txt = MIMEText('UTF-8 encoded body', 'plain', 'utf-8')
self.assertIn('Content-Transfer-Encoding: base64', txt.as_string())
def test_7bit(self):
txt = MIMEText('Body with only ASCII characters.', 'plain', 'utf-8')
self.assertIn('Content-Transfer-Encoding: base64', txt.as_string())
def test_8bit_latin(self):
txt = MIMEText('Body with latin characters: àáä.', 'plain', 'utf-8')
self.assertIn(str('Content-Transfer-Encoding: base64'), txt.as_string())
def test_8bit_non_latin(self):
txt = MIMEText('Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', 'plain', 'utf-8')
self.assertIn(str('Content-Transfer-Encoding: base64'), txt.as_string())
class BaseEmailBackendTests(HeadersCheckMixin, object):
email_backend = None
def setUp(self):
self.settings_override = override_settings(EMAIL_BACKEND=self.email_backend)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def assertStartsWith(self, first, second):
if not first.startswith(second):
self.longMessage = True
self.assertEqual(first[:len(second)], second, "First string doesn't start with the second.")
def get_mailbox_content(self):
raise NotImplementedError('subclasses of BaseEmailBackendTests must provide a get_mailbox_content() method')
def flush_mailbox(self):
raise NotImplementedError('subclasses of BaseEmailBackendTests may require a flush_mailbox() method')
def get_the_message(self):
mailbox = self.get_mailbox_content()
self.assertEqual(
len(mailbox), 1,
"Expected exactly one message, got %d.\n%r" % (len(mailbox), [m.as_string() for m in mailbox])
)
return mailbox[0]
def test_send(self):
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "[email protected]")
self.assertEqual(message.get_all("to"), ["[email protected]"])
def test_send_unicode(self):
email = EmailMessage('Chère maman', 'Je t\'aime très fort', '[email protected]', ['[email protected]'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], '=?utf-8?q?Ch=C3=A8re_maman?=')
self.assertEqual(force_text(message.get_payload(decode=True)), 'Je t\'aime très fort')
def test_send_long_lines(self):
"""
Email line length is limited to 998 chars by the RFC:
https://tools.ietf.org/html/rfc5322#section-2.1.1
Message body containing longer lines are converted to Quoted-Printable
to avoid having to insert newlines, which could be hairy to do properly.
"""
email = EmailMessage('Subject', "Comment ça va? " * 100, '[email protected]', ['[email protected]'])
email.send()
message = self.get_the_message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', 'quoted-printable'),
})
def test_send_many(self):
email1 = EmailMessage('Subject', 'Content1', '[email protected]', ['[email protected]'])
email2 = EmailMessage('Subject', 'Content2', '[email protected]', ['[email protected]'])
# send_messages() may take a list or a generator.
emails_lists = ([email1, email2], (email for email in [email1, email2]))
for emails_list in emails_lists:
num_sent = mail.get_connection().send_messages(emails_list)
self.assertEqual(num_sent, 2)
messages = self.get_mailbox_content()
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0].get_payload(), 'Content1')
self.assertEqual(messages[1].get_payload(), 'Content2')
self.flush_mailbox()
def test_send_verbose_name(self):
email = EmailMessage("Subject", "Content", '"Firstname Sürname" <[email protected]>',
["[email protected]"])
email.send()
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>")
def test_plaintext_send_mail(self):
"""
Test send_mail without the html_message
regression test for adding html_message parameter to send_mail()
"""
send_mail('Subject', 'Content', '[email protected]', ['[email protected]'])
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get_all('to'), ['[email protected]'])
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message.get_content_type(), 'text/plain')
def test_html_send_mail(self):
"""Test html_message argument to send_mail"""
send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get_all('to'), ['[email protected]'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(MANAGERS=[('nobody', '[email protected]')])
def test_html_mail_managers(self):
"""Test html_message argument to mail_managers"""
mail_managers('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['[email protected]'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(ADMINS=[('nobody', '[email protected]')])
def test_html_mail_admins(self):
"""Test html_message argument to mail_admins """
mail_admins('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['[email protected]'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(
ADMINS=[('nobody', '[email protected]')],
MANAGERS=[('nobody', '[email protected]')])
def test_manager_and_admin_mail_prefix(self):
"""
String prefix + lazy translated subject = bad output
Regression for #13494
"""
mail_managers(ugettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.flush_mailbox()
mail_admins(ugettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
@override_settings(ADMINS=[], MANAGERS=[])
def test_empty_admins(self):
"""
Test that mail_admins/mail_managers doesn't connect to the mail server
if there are no recipients (#9383)
"""
mail_admins('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
mail_managers('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
def test_message_cc_header(self):
"""
Regression test for #7722
"""
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], cc=['[email protected]'])
mail.get_connection().send_messages([email])
message = self.get_the_message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', '[email protected]'),
('To', '[email protected]'),
('Cc', '[email protected]')})
self.assertIn('\nDate: ', message.as_string())
def test_idn_send(self):
"""
Regression test for #14301
"""
self.assertTrue(send_mail('Subject', 'Content', 'from@öäü.com', ['to@öäü.com']))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), '[email protected]')
self.assertEqual(message.get('to'), '[email protected]')
self.flush_mailbox()
m = EmailMessage('Subject', 'Content', 'from@öäü.com', ['to@öäü.com'], cc=['cc@öäü.com'])
m.send()
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), '[email protected]')
self.assertEqual(message.get('to'), '[email protected]')
self.assertEqual(message.get('cc'), '[email protected]')
def test_recipient_without_domain(self):
"""
Regression test for #15042
"""
self.assertTrue(send_mail("Subject", "Content", "tester", ["django"]))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), "tester")
self.assertEqual(message.get('to'), "django")
def test_lazy_addresses(self):
"""
Email sending should support lazy email addresses (#24416).
"""
_ = ugettext_lazy
self.assertTrue(send_mail('Subject', 'Content', _('tester'), [_('django')]))
message = self.get_the_message()
self.assertEqual(message.get('from'), 'tester')
self.assertEqual(message.get('to'), 'django')
self.flush_mailbox()
m = EmailMessage(
'Subject', 'Content', _('tester'), [_('to1'), _('to2')],
cc=[_('cc1'), _('cc2')],
bcc=[_('bcc')],
reply_to=[_('reply')],
)
self.assertEqual(m.recipients(), ['to1', 'to2', 'cc1', 'cc2', 'bcc'])
m.send()
message = self.get_the_message()
self.assertEqual(message.get('from'), 'tester')
self.assertEqual(message.get('to'), 'to1, to2')
self.assertEqual(message.get('cc'), 'cc1, cc2')
self.assertEqual(message.get('Reply-To'), 'reply')
def test_close_connection(self):
"""
Test that connection can be closed (even when not explicitly opened)
"""
conn = mail.get_connection(username='', password='')
conn.close()
def test_use_as_contextmanager(self):
"""
Test that the connection can be used as a contextmanager.
"""
opened = [False]
closed = [False]
conn = mail.get_connection(username='', password='')
def open():
opened[0] = True
conn.open = open
def close():
closed[0] = True
conn.close = close
with conn as same_conn:
self.assertTrue(opened[0])
self.assertIs(same_conn, conn)
self.assertFalse(closed[0])
self.assertTrue(closed[0])
class LocmemBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.locmem.EmailBackend'
def get_mailbox_content(self):
return [m.message() for m in mail.outbox]
def flush_mailbox(self):
mail.outbox = []
def tearDown(self):
super(LocmemBackendTests, self).tearDown()
mail.outbox = []
def test_locmem_shared_messages(self):
"""
Make sure that the locmen backend populates the outbox.
"""
connection = locmem.EmailBackend()
connection2 = locmem.EmailBackend()
email = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
connection.send_messages([email])
connection2.send_messages([email])
self.assertEqual(len(mail.outbox), 2)
def test_validate_multiline_headers(self):
# Ticket #18861 - Validate emails when using the locmem backend
with self.assertRaises(BadHeaderError):
send_mail('Subject\nMultiline', 'Content', '[email protected]', ['[email protected]'])
class FileBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.filebased.EmailBackend'
def setUp(self):
super(FileBackendTests, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp_dir)
self._settings_override = override_settings(EMAIL_FILE_PATH=self.tmp_dir)
self._settings_override.enable()
def tearDown(self):
self._settings_override.disable()
super(FileBackendTests, self).tearDown()
def flush_mailbox(self):
for filename in os.listdir(self.tmp_dir):
os.unlink(os.path.join(self.tmp_dir, filename))
def get_mailbox_content(self):
messages = []
for filename in os.listdir(self.tmp_dir):
with open(os.path.join(self.tmp_dir, filename), 'rb') as fp:
session = fp.read().split(force_bytes('\n' + ('-' * 79) + '\n', encoding='ascii'))
messages.extend(message_from_bytes(m) for m in session if m)
return messages
def test_file_sessions(self):
"""Make sure opening a connection creates a new file"""
msg = EmailMessage(
'Subject', 'Content', '[email protected]', ['[email protected]'],
headers={'From': '[email protected]'},
)
connection = mail.get_connection()
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 1)
with open(os.path.join(self.tmp_dir, os.listdir(self.tmp_dir)[0]), 'rb') as fp:
message = message_from_binary_file(fp)
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), '[email protected]')
self.assertEqual(message.get('to'), '[email protected]')
connection2 = mail.get_connection()
connection2.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
msg.connection = mail.get_connection()
self.assertTrue(connection.open())
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
connection.close()
class ConsoleBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.console.EmailBackend'
def setUp(self):
super(ConsoleBackendTests, self).setUp()
self.__stdout = sys.stdout
self.stream = sys.stdout = StringIO()
def tearDown(self):
del self.stream
sys.stdout = self.__stdout
del self.__stdout
super(ConsoleBackendTests, self).tearDown()
def flush_mailbox(self):
self.stream = sys.stdout = StringIO()
def get_mailbox_content(self):
messages = self.stream.getvalue().split(str('\n' + ('-' * 79) + '\n'))
return [message_from_bytes(force_bytes(m)) for m in messages if m]
def test_console_stream_kwarg(self):
"""
Test that the console backend can be pointed at an arbitrary stream.
"""
s = StringIO()
connection = mail.get_connection('django.core.mail.backends.console.EmailBackend', stream=s)
send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], connection=connection)
message = force_bytes(s.getvalue().split('\n' + ('-' * 79) + '\n')[0])
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', '[email protected]'),
('To', '[email protected]')})
self.assertIn(b'\nDate: ', message)
class FakeSMTPChannel(smtpd.SMTPChannel):
def collect_incoming_data(self, data):
try:
smtpd.SMTPChannel.collect_incoming_data(self, data)
except UnicodeDecodeError:
# ignore decode error in SSL/TLS connection tests as we only care
# whether the connection attempt was made
pass
def smtp_AUTH(self, arg):
if arg == 'CRAM-MD5':
# This is only the first part of the login process. But it's enough
# for our tests.
challenge = base64.b64encode(b'somerandomstring13579')
self.push(str('334 %s' % challenge.decode()))
else:
self.push(str('502 Error: login "%s" not implemented' % arg))
class FakeSMTPServer(smtpd.SMTPServer, threading.Thread):
"""
Asyncore SMTP server wrapped into a thread. Based on DummyFTPServer from:
http://svn.python.org/view/python/branches/py3k/Lib/test/test_ftplib.py?revision=86061&view=markup
"""
channel_class = FakeSMTPChannel
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self)
# New kwarg added in Python 3.5; default switching to False in 3.6.
if sys.version_info >= (3, 5):
kwargs['decode_data'] = True
smtpd.SMTPServer.__init__(self, *args, **kwargs)
self._sink = []
self.active = False
self.active_lock = threading.Lock()
self.sink_lock = threading.Lock()
if not PY3:
def handle_accept(self):
# copy of Python 2.7 smtpd.SMTPServer.handle_accept with hardcoded
# SMTPChannel replaced by self.channel_class
pair = self.accept()
if pair is not None:
conn, addr = pair
self.channel_class(self, conn, addr)
def process_message(self, peer, mailfrom, rcpttos, data):
if PY3:
data = data.encode('utf-8')
m = message_from_bytes(data)
maddr = parseaddr(m.get('from'))[1]
if mailfrom != maddr:
# According to the spec, mailfrom does not necessarily match the
# From header - on Python 3 this is the case where the local part
# isn't encoded, so try to correct that.
lp, domain = mailfrom.split('@', 1)
lp = Header(lp, 'utf-8').encode()
mailfrom = '@'.join([lp, domain])
if mailfrom != maddr:
return "553 '%s' != '%s'" % (mailfrom, maddr)
with self.sink_lock:
self._sink.append(m)
def get_sink(self):
with self.sink_lock:
return self._sink[:]
def flush_sink(self):
with self.sink_lock:
self._sink[:] = []
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
with self.active_lock:
asyncore.loop(timeout=0.1, count=1)
asyncore.close_all()
def stop(self):
if self.active:
self.active = False
self.join()
class FakeAUTHSMTPConnection(SMTP):
"""
A SMTP connection pretending support for the AUTH command. It does not, but
at least this can allow testing the first part of the AUTH process.
"""
def ehlo(self, name=''):
response = SMTP.ehlo(self, name=name)
self.esmtp_features.update({
'auth': 'CRAM-MD5 PLAIN LOGIN',
})
return response
class SMTPBackendTestsBase(SimpleTestCase):
@classmethod
def setUpClass(cls):
super(SMTPBackendTestsBase, cls).setUpClass()
cls.server = FakeSMTPServer(('127.0.0.1', 0), None)
cls._settings_override = override_settings(
EMAIL_HOST="127.0.0.1",
EMAIL_PORT=cls.server.socket.getsockname()[1])
cls._settings_override.enable()
cls.server.start()
@classmethod
def tearDownClass(cls):
cls._settings_override.disable()
cls.server.stop()
super(SMTPBackendTestsBase, cls).tearDownClass()
class SMTPBackendTests(BaseEmailBackendTests, SMTPBackendTestsBase):
email_backend = 'django.core.mail.backends.smtp.EmailBackend'
def setUp(self):
super(SMTPBackendTests, self).setUp()
self.server.flush_sink()
def tearDown(self):
self.server.flush_sink()
super(SMTPBackendTests, self).tearDown()
def flush_mailbox(self):
self.server.flush_sink()
def get_mailbox_content(self):
return self.server.get_sink()
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_authentication_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.username, 'not empty username')
self.assertEqual(backend.password, 'not empty password')
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_authentication_override_settings(self):
backend = smtp.EmailBackend(username='username', password='password')
self.assertEqual(backend.username, 'username')
self.assertEqual(backend.password, 'password')
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_disabled_authentication(self):
backend = smtp.EmailBackend(username='', password='')
self.assertEqual(backend.username, '')
self.assertEqual(backend.password, '')
def test_auth_attempted(self):
"""
Test that opening the backend with non empty username/password tries
to authenticate against the SMTP server.
"""
backend = smtp.EmailBackend(
username='not empty username', password='not empty password')
try:
with self.assertRaisesMessage(SMTPException, 'SMTP AUTH extension not supported by server.'):
backend.open()
finally:
backend.close()
def test_server_open(self):
"""
Test that open() tells us whether it opened a connection.
"""
backend = smtp.EmailBackend(username='', password='')
self.assertFalse(backend.connection)
opened = backend.open()
backend.close()
self.assertTrue(opened)
def test_server_login(self):
"""
Even if the Python SMTP server doesn't support authentication, the
login process starts and the appropriate exception is raised.
"""
class CustomEmailBackend(smtp.EmailBackend):
connection_class = FakeAUTHSMTPConnection
backend = CustomEmailBackend(username='username', password='password')
with self.assertRaises(SMTPAuthenticationError):
backend.open()
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_use_settings(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_tls)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_override_settings(self):
backend = smtp.EmailBackend(use_tls=False)
self.assertFalse(backend.use_tls)
def test_email_tls_default_disabled(self):
backend = smtp.EmailBackend()
self.assertFalse(backend.use_tls)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_use_settings(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_ssl)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_override_settings(self):
backend = smtp.EmailBackend(use_ssl=False)
self.assertFalse(backend.use_ssl)
def test_email_ssl_default_disabled(self):
backend = smtp.EmailBackend()
self.assertFalse(backend.use_ssl)
@override_settings(EMAIL_SSL_CERTFILE='foo')
def test_email_ssl_certfile_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.ssl_certfile, 'foo')
@override_settings(EMAIL_SSL_CERTFILE='foo')
def test_email_ssl_certfile_override_settings(self):
backend = smtp.EmailBackend(ssl_certfile='bar')
self.assertEqual(backend.ssl_certfile, 'bar')
def test_email_ssl_certfile_default_disabled(self):
backend = smtp.EmailBackend()
self.assertIsNone(backend.ssl_certfile)
@override_settings(EMAIL_SSL_KEYFILE='foo')
def test_email_ssl_keyfile_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.ssl_keyfile, 'foo')
@override_settings(EMAIL_SSL_KEYFILE='foo')
def test_email_ssl_keyfile_override_settings(self):
backend = smtp.EmailBackend(ssl_keyfile='bar')
self.assertEqual(backend.ssl_keyfile, 'bar')
def test_email_ssl_keyfile_default_disabled(self):
backend = smtp.EmailBackend()
self.assertIsNone(backend.ssl_keyfile)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_attempts_starttls(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_tls)
try:
with self.assertRaisesMessage(SMTPException, 'STARTTLS extension not supported by server.'):
backend.open()
finally:
backend.close()
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_attempts_ssl_connection(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_ssl)
try:
with self.assertRaises(SSLError):
backend.open()
finally:
backend.close()
def test_connection_timeout_default(self):
"""Test that the connection's timeout value is None by default."""
connection = mail.get_connection('django.core.mail.backends.smtp.EmailBackend')
self.assertIsNone(connection.timeout)
def test_connection_timeout_custom(self):
"""Test that the timeout parameter can be customized."""
class MyEmailBackend(smtp.EmailBackend):
def __init__(self, *args, **kwargs):
kwargs.setdefault('timeout', 42)
super(MyEmailBackend, self).__init__(*args, **kwargs)
myemailbackend = MyEmailBackend()
myemailbackend.open()
self.assertEqual(myemailbackend.timeout, 42)
self.assertEqual(myemailbackend.connection.timeout, 42)
myemailbackend.close()
@override_settings(EMAIL_TIMEOUT=10)
def test_email_timeout_override_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.timeout, 10)
def test_email_msg_uses_crlf(self):
"""#23063 -- Test that RFC-compliant messages are sent over SMTP."""
send = SMTP.send
try:
smtp_messages = []
def mock_send(self, s):
smtp_messages.append(s)
return send(self, s)
SMTP.send = mock_send
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'])
mail.get_connection().send_messages([email])
# Find the actual message
msg = None
for i, m in enumerate(smtp_messages):
if m[:4] == 'data':
msg = smtp_messages[i + 1]
break
self.assertTrue(msg)
if PY3:
msg = msg.decode('utf-8')
# Ensure that the message only contains CRLF and not combinations of CRLF, LF, and CR.
msg = msg.replace('\r\n', '')
self.assertNotIn('\r', msg)
self.assertNotIn('\n', msg)
finally:
SMTP.send = send
def test_send_messages_after_open_failed(self):
"""
send_messages() shouldn't try to send messages if open() raises an
exception after initializing the connection.
"""
backend = smtp.EmailBackend()
# Simulate connection initialization success and a subsequent
# connection exception.
backend.connection = True
backend.open = lambda: None
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'])
self.assertEqual(backend.send_messages([email]), None)
class SMTPBackendStoppedServerTests(SMTPBackendTestsBase):
"""
These tests require a separate class, because the FakeSMTPServer is shut
down in setUpClass(), and it cannot be restarted ("RuntimeError: threads
can only be started once").
"""
@classmethod
def setUpClass(cls):
super(SMTPBackendStoppedServerTests, cls).setUpClass()
cls.backend = smtp.EmailBackend(username='', password='')
cls.server.stop()
def test_server_stopped(self):
"""
Closing the backend while the SMTP server is stopped doesn't raise an
exception.
"""
self.backend.close()
def test_fail_silently_on_connection_error(self):
"""
A socket connection error is silenced with fail_silently=True.
"""
with self.assertRaises(socket.error):
self.backend.open()
self.backend.fail_silently = True
self.backend.open()
| 41.828648 | 119 | 0.626544 |
3b8163cbba806c1cf04ec6c18d98c40cd6001c65
| 14,010 |
py
|
Python
|
google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py
|
connor-mccarthy/python-aiplatform
|
184f7f327aa00b4c8d1acc24dcb1c4c4be6c5bcc
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py
|
connor-mccarthy/python-aiplatform
|
184f7f327aa00b4c8d1acc24dcb1c4c4be6c5bcc
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py
|
connor-mccarthy/python-aiplatform
|
184f7f327aa00b4c8d1acc24dcb1c4c4be6c5bcc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.aiplatform_v1beta1.types import migration_service
from google.longrunning import operations_pb2 # type: ignore
from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO
class MigrationServiceGrpcTransport(MigrationServiceTransport):
"""gRPC backend transport for MigrationService.
A service that migrates resources from automl.googleapis.com,
datalabeling.googleapis.com and ml.googleapis.com to Vertex AI.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service."""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def search_migratable_resources(
self,
) -> Callable[
[migration_service.SearchMigratableResourcesRequest],
migration_service.SearchMigratableResourcesResponse,
]:
r"""Return a callable for the search migratable resources method over gRPC.
Searches all of the resources in
automl.googleapis.com, datalabeling.googleapis.com and
ml.googleapis.com that can be migrated to Vertex AI's
given location.
Returns:
Callable[[~.SearchMigratableResourcesRequest],
~.SearchMigratableResourcesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "search_migratable_resources" not in self._stubs:
self._stubs["search_migratable_resources"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.MigrationService/SearchMigratableResources",
request_serializer=migration_service.SearchMigratableResourcesRequest.serialize,
response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize,
)
return self._stubs["search_migratable_resources"]
@property
def batch_migrate_resources(
self,
) -> Callable[
[migration_service.BatchMigrateResourcesRequest], operations_pb2.Operation
]:
r"""Return a callable for the batch migrate resources method over gRPC.
Batch migrates resources from ml.googleapis.com,
automl.googleapis.com, and datalabeling.googleapis.com
to Vertex AI.
Returns:
Callable[[~.BatchMigrateResourcesRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_migrate_resources" not in self._stubs:
self._stubs["batch_migrate_resources"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.MigrationService/BatchMigrateResources",
request_serializer=migration_service.BatchMigrateResourcesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_migrate_resources"]
def close(self):
self.grpc_channel.close()
__all__ = ("MigrationServiceGrpcTransport",)
| 44.47619 | 102 | 0.645967 |
199df702c3bf952ad21511a7c1775b148fc538f8
| 1,041 |
py
|
Python
|
salt/pillar/cmd_yaml.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 12 |
2015-01-21T00:18:25.000Z
|
2021-07-11T07:35:26.000Z
|
salt/pillar/cmd_yaml.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 86 |
2017-01-27T11:54:46.000Z
|
2020-05-20T06:25:26.000Z
|
salt/pillar/cmd_yaml.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 12 |
2015-01-05T09:50:42.000Z
|
2019-08-19T01:43:40.000Z
|
# -*- coding: utf-8 -*-
'''
Execute a command and read the output as YAML. The YAML data is then directly overlaid onto the minion's Pillar data
'''
from __future__ import absolute_import, print_function, unicode_literals
# Don't "fix" the above docstring to put it on two lines, as the sphinx
# autosummary pulls only the first line for its description.
# Import Python libs
import logging
# Import Salt party libs
import salt.utils.yaml
# Set up logging
log = logging.getLogger(__name__)
def ext_pillar(minion_id, # pylint: disable=W0613
pillar, # pylint: disable=W0613
command):
'''
Execute a command and read the output as YAML
'''
try:
command = command.replace('%s', minion_id)
output = __salt__['cmd.run_stdout'](command, python_shell=True)
return salt.utils.yaml.safe_load(output)
except Exception:
log.critical(
'YAML data from \'%s\' failed to parse. Command output:\n%s',
command, output
)
return {}
| 28.916667 | 116 | 0.660903 |
eda3677d2dd559dcad5bbf2b52b53c942d2a527d
| 65 |
py
|
Python
|
tinynn/converter/operators/__init__.py
|
www516717402/TinyNeuralNetwork
|
23e7931b4377462fad94a9ab0651b6d9a346252d
|
[
"MIT"
] | 1 |
2022-01-11T06:40:13.000Z
|
2022-01-11T06:40:13.000Z
|
tinynn/converter/operators/__init__.py
|
www516717402/TinyNeuralNetwork
|
23e7931b4377462fad94a9ab0651b6d9a346252d
|
[
"MIT"
] | null | null | null |
tinynn/converter/operators/__init__.py
|
www516717402/TinyNeuralNetwork
|
23e7931b4377462fad94a9ab0651b6d9a346252d
|
[
"MIT"
] | 1 |
2021-12-20T07:21:37.000Z
|
2021-12-20T07:21:37.000Z
|
from .base import *
from .graph import *
from .optimize import *
| 16.25 | 23 | 0.723077 |
2d8abe6b9b57d08427d340d3b708d73c682b43e3
| 7,182 |
py
|
Python
|
ppdet/modeling/architectures/faceboxes.py
|
heavengate/PaddleDetection
|
84e79e8760ba2ef7fbc3972d865316af9aade014
|
[
"Apache-2.0"
] | 10 |
2020-11-24T12:32:37.000Z
|
2021-09-06T08:41:04.000Z
|
ppdet/modeling/architectures/faceboxes.py
|
heavengate/PaddleDetection
|
84e79e8760ba2ef7fbc3972d865316af9aade014
|
[
"Apache-2.0"
] | null | null | null |
ppdet/modeling/architectures/faceboxes.py
|
heavengate/PaddleDetection
|
84e79e8760ba2ef7fbc3972d865316af9aade014
|
[
"Apache-2.0"
] | 2 |
2021-01-25T06:02:48.000Z
|
2021-11-10T10:14:25.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from collections import OrderedDict
from paddle import fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.regularizer import L2Decay
from ppdet.core.workspace import register
from ppdet.modeling.ops import SSDOutputDecoder
__all__ = ['FaceBoxes']
@register
class FaceBoxes(object):
"""
FaceBoxes: A CPU Real-time Face Detector with High Accuracy.
see https://arxiv.org/abs/1708.05234
Args:
backbone (object): backbone instance
output_decoder (object): `SSDOutputDecoder` instance
densities (list|None): the densities of generated density prior boxes,
this attribute should be a list or tuple of integers.
fixed_sizes (list|None): the fixed sizes of generated density prior boxes,
this attribute should a list or tuple of same length with `densities`.
num_classes (int): number of output classes.
steps (list|None): step size of adjacent prior boxes on each feature map.
"""
__category__ = 'architecture'
__inject__ = ['backbone', 'output_decoder']
__shared__ = ['num_classes']
def __init__(self,
backbone="FaceBoxNet",
output_decoder=SSDOutputDecoder().__dict__,
densities=[[4, 2, 1], [1], [1]],
fixed_sizes=[[32., 64., 128.], [256.], [512.]],
num_classes=2,
steps=[8., 16., 32.]):
super(FaceBoxes, self).__init__()
self.backbone = backbone
self.num_classes = num_classes
self.output_decoder = output_decoder
if isinstance(output_decoder, dict):
self.output_decoder = SSDOutputDecoder(**output_decoder)
self.densities = densities
self.fixed_sizes = fixed_sizes
self.steps = steps
def build(self, feed_vars, mode='train'):
im = feed_vars['image']
if mode == 'train':
gt_bbox = feed_vars['gt_bbox']
gt_class = feed_vars['gt_class']
body_feats = self.backbone(im)
locs, confs, box, box_var = self._multi_box_head(
inputs=body_feats, image=im, num_classes=self.num_classes)
if mode == 'train':
loss = fluid.layers.ssd_loss(
locs,
confs,
gt_bbox,
gt_class,
box,
box_var,
overlap_threshold=0.35,
neg_overlap=0.35)
loss = fluid.layers.reduce_sum(loss)
return {'loss': loss}
else:
pred = self.output_decoder(locs, confs, box, box_var)
return {'bbox': pred}
def _multi_box_head(self, inputs, image, num_classes=2):
def permute_and_reshape(input, last_dim):
trans = fluid.layers.transpose(input, perm=[0, 2, 3, 1])
compile_shape = [0, -1, last_dim]
return fluid.layers.reshape(trans, shape=compile_shape)
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
locs, confs = [], []
boxes, vars = [], []
b_attr = ParamAttr(learning_rate=2., regularizer=L2Decay(0.))
for i, input in enumerate(inputs):
densities = self.densities[i]
fixed_sizes = self.fixed_sizes[i]
box, var = fluid.layers.density_prior_box(
input,
image,
densities=densities,
fixed_sizes=fixed_sizes,
fixed_ratios=[1.],
clip=False,
offset=0.5,
steps=[self.steps[i]] * 2)
num_boxes = box.shape[2]
box = fluid.layers.reshape(box, shape=[-1, 4])
var = fluid.layers.reshape(var, shape=[-1, 4])
num_loc_output = num_boxes * 4
num_conf_output = num_boxes * num_classes
# get loc
mbox_loc = fluid.layers.conv2d(
input, num_loc_output, 3, 1, 1, bias_attr=b_attr)
loc = permute_and_reshape(mbox_loc, 4)
# get conf
mbox_conf = fluid.layers.conv2d(
input, num_conf_output, 3, 1, 1, bias_attr=b_attr)
conf = permute_and_reshape(mbox_conf, 2)
locs.append(loc)
confs.append(conf)
boxes.append(box)
vars.append(var)
face_mbox_loc = fluid.layers.concat(locs, axis=1)
face_mbox_conf = fluid.layers.concat(confs, axis=1)
prior_boxes = fluid.layers.concat(boxes)
box_vars = fluid.layers.concat(vars)
return face_mbox_loc, face_mbox_conf, prior_boxes, box_vars
def _inputs_def(self, image_shape):
im_shape = [None] + image_shape
# yapf: disable
inputs_def = {
'image': {'shape': im_shape, 'dtype': 'float32', 'lod_level': 0},
'im_id': {'shape': [None, 1], 'dtype': 'int64', 'lod_level': 0},
'gt_bbox': {'shape': [None, 4], 'dtype': 'float32', 'lod_level': 1},
'gt_class': {'shape': [None, 1], 'dtype': 'int32', 'lod_level': 1},
'im_shape': {'shape': [None, 3], 'dtype': 'int32', 'lod_level': 0},
}
# yapf: enable
return inputs_def
def build_inputs(
self,
image_shape=[3, None, None],
fields=['image', 'im_id', 'gt_bbox', 'gt_class'], # for train
use_dataloader=True,
iterable=False):
inputs_def = self._inputs_def(image_shape)
feed_vars = OrderedDict([(key, fluid.data(
name=key,
shape=inputs_def[key]['shape'],
dtype=inputs_def[key]['dtype'],
lod_level=inputs_def[key]['lod_level'])) for key in fields])
loader = fluid.io.DataLoader.from_generator(
feed_list=list(feed_vars.values()),
capacity=16,
use_double_buffer=True,
iterable=iterable) if use_dataloader else None
return feed_vars, loader
def train(self, feed_vars):
return self.build(feed_vars, 'train')
def eval(self, feed_vars):
return self.build(feed_vars, 'eval')
def test(self, feed_vars, exclude_nms=False):
assert not exclude_nms, "exclude_nms for {} is not support currently".format(
self.__class__.__name__)
return self.build(feed_vars, 'test')
def is_bbox_normalized(self):
return True
| 37.212435 | 85 | 0.597048 |
802d0339e515355925f14f9a157e072690af177a
| 15,571 |
py
|
Python
|
unstable_baselines/algo/td3/run.py
|
Ending2015a/unstable_baselines
|
1d304115406f6e29186cedb0160811d4139e2733
|
[
"MIT"
] | 10 |
2021-04-26T17:48:27.000Z
|
2022-03-10T14:32:26.000Z
|
unstable_baselines/algo/td3/run.py
|
Ending2015a/unstable_baselines
|
1d304115406f6e29186cedb0160811d4139e2733
|
[
"MIT"
] | null | null | null |
unstable_baselines/algo/td3/run.py
|
Ending2015a/unstable_baselines
|
1d304115406f6e29186cedb0160811d4139e2733
|
[
"MIT"
] | null | null | null |
__copyright__ = '''
The MIT License (MIT)
Copyright (c) 2021 Joe Hsiao
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
'''
__license__ = 'MIT'
# --- built in ---
import os
import re
import sys
import time
import logging
import argparse
import datetime
# --- 3rd party ---
import gym
import numpy as np
import tensorflow as tf
from gym.wrappers import TimeLimit
# --- my module ---
from unstable_baselines import logger
from unstable_baselines.envs import *
from unstable_baselines.utils import (NormalActionNoise,
set_global_seeds)
from unstable_baselines.td3.model import TD3
from unstable_baselines.td3.model import Agent as TD3Agent
def parse_args():
parser = argparse.ArgumentParser(description='Twin-Delayed Deep Deterministic Policy Gradient (TD3)')
parser.add_argument('--logdir', type=str, default='log/{env_id}/td3/{rank}',help='Root dir (args: {env_id}, {rank})')
parser.add_argument('--logging', type=str, default='train.log', help='Log path (args: {env_id}, {rank})')
parser.add_argument('--log_level', type=str, default='INFO', help='Log level')
parser.add_argument('--monitor_dir', type=str, default='monitor', help='Monitor dir (args: {env_id}, {rank})')
parser.add_argument('--tb_logdir', type=str, default='', help='Tensorboard log name (args: {env_id}, {rank})')
parser.add_argument('--model_dir', type=str, default='model', help='Model save path (args: {env_id}, {rank})')
parser.add_argument('--env_id', type=str, default='HalfCheetahBulletEnv-v0',help='Environment ID')
parser.add_argument('--num_envs', type=int, default=1, help='Number of environments')
parser.add_argument('--num_epochs', type=int, default=10000, help='Number of training epochs')
parser.add_argument('--num_steps', type=int, default=100, help='Number of timesteps per epoch (interact with envs)')
parser.add_argument('--num_gradsteps', type=int, default=100, help='Number of gradient steps')
parser.add_argument('--batch_size', type=int, default=100, help='Training batch size')
parser.add_argument('--buffer_size', type=int, default=1000000, help='Maximum size of replay buffer')
parser.add_argument('--min_buffer', type=int, default=10000, help='Minimum number of samples in replay buffer')
parser.add_argument('--policy_update', type=int, default=2, help='Delayed update to policy network (gradsteps)')
parser.add_argument('--target_update', type=int, default=2, help='Target network update frequency (gradsteps)')
parser.add_argument('--verbose', type=int, default=1, help='Print more message, 0=less, 1=more train log, 2=more eval log')
parser.add_argument('--rank', type=int, default=0, help='Optional arguments for parallel training')
parser.add_argument('--seed', type=int, default=0, help='Random seed')
parser.add_argument('--log_interval', type=int, default=10, help='Logging interval (epochs)')
parser.add_argument('--eval_interval', type=int, default=1000, help='Evaluation interval (epochs)')
parser.add_argument('--eval_episodes', type=int, default=5, help='Number of episodes each evaluation')
parser.add_argument('--eval_max_steps', type=int, default=1000, help='Maximum timesteps in each evaluation episode')
parser.add_argument('--eval_seed', type=int, default=0, help='Environment seed for evaluation')
parser.add_argument('--save_interval', type=int, default=1000, help='Model checkpoint interval (epochs)')
parser.add_argument('--lr', type=float, default=1e-3, help='Learning rate')
parser.add_argument('--gamma', type=float, default=0.99, help='Discount factor')
parser.add_argument('--tau', type=float, default=0.005, help='Polyak update coefficient (tau in original paper)')
parser.add_argument('--max_grad_norm', type=float, default=None, help='Gradient norm clip range')
parser.add_argument('--action_noise', type=float, default=0.2, help='Noise scale added to target actions')
parser.add_argument('--action_noise_clip', type=float, default=0.5, help='Noise range added to target actions')
parser.add_argument('--explore_noise_mean', type=float, default=0, help='Mean of normal action noise')
parser.add_argument('--explore_noise_scale', type=float, default=0.1, help='Scale of normal action noise')
parser.add_argument('--explore_noise', action='store_true', help='Enable exploration noise')
parser.add_argument('--force_mlp', action='store_true', help='Use MLP network')
parser.add_argument('--record_video', action='store_true', help='Enable video recording')
a = parser.parse_args()
a.logdir = a.logdir.format(env_id=a.env_id, rank=a.rank)
a.logging = os.path.join(a.logdir, a.logging).format(env_id=a.env_id, rank=a.rank)
a.monitor_dir = os.path.join(a.logdir, a.monitor_dir).format(env_id=a.env_id, rank=a.rank)
a.tb_logdir = os.path.join(a.logdir, a.tb_logdir).format(env_id=a.env_id, rank=a.rank)
a.model_dir = os.path.join(a.logdir, a.model_dir).format(env_id=a.env_id, rank=a.rank)
return a
def make_env(a, eval=False):
'''
Make non-Atari env (Pybullet)
'''
import pybullet_envs
if not eval:
def _make_env(rank, a):
def _init():
logger.Config.use(filename=a.logging, level=a.log_level,
colored=True, reset=True)
set_global_seeds(a.seed)
import pybullet_envs
env = gym.make(a.env_id)
env = TimeFeatureWrapper(env)
env = SeedEnv(env, seed=a.seed+rank)
if a.record_video:
env = VideoRecorder(env, os.path.join(a.monitor_dir, 'video/'),
prefix='train.{}'.format(rank), force=True)
env = Monitor(env, a.monitor_dir, prefix=str(rank), force=True)
return env
return _init
env = SubprocVecEnv([_make_env(i, a) for i in range(a.num_envs)])
else:
env = gym.make(a.env_id)
env = TimeFeatureWrapper(env, test_mode=True)
env = SeedEnv(env, seed=a.eval_seed)
if a.record_video:
env = VideoRecorder(env, os.path.join(a.monitor_dir, 'video/'),
prefix='eval', callback=True, force=True)
env = Monitor(env, a.monitor_dir, prefix='eval',force=True)
return env
if __name__ == '__main__':
a = parse_args()
# === Reset logger ===
logger.Config.use(filename=a.logging, level=a.log_level, colored=True, reset=True)
LOG = logger.getLogger('TD3')
# === Print welcome message ===
LOG.add_row('')
LOG.add_rows('TD3', fmt='{:@f:ANSI_Shadow}', align='center')
LOG.add_line()
LOG.add_rows('{}'.format(__copyright__))
LOG.flush('INFO')
time.sleep(1)
# === Print arguments ===
LOG.set_header('Arguments')
LOG.add_row('Log dir', a.logdir)
LOG.add_row('Logging path', a.logging)
LOG.add_row('Monitor path', a.monitor_dir)
LOG.add_row('Tensorboard path', a.tb_logdir)
LOG.add_row('Model path', a.model_dir)
LOG.add_row('Env ID', a.env_id)
LOG.add_row('Seed', a.seed)
LOG.add_row('Eval seed', a.eval_seed)
LOG.add_row('Record video', a.record_video)
LOG.add_line()
LOG.add_row('Num of envs', a.num_envs)
LOG.add_row('Num of steps/epoch', a.num_steps)
LOG.add_row('Num of gradient steps', a.num_gradsteps)
LOG.add_row('Num of epochs', a.num_epochs)
LOG.add_row('Target update freq', a.target_update)
LOG.add_row('Log interval', a.log_interval)
LOG.add_row('Eval interval', a.eval_interval)
LOG.add_row('Eval episodes', a.eval_episodes)
LOG.add_row('Eval max steps', a.eval_max_steps)
LOG.add_row('Save interval', a.save_interval)
LOG.add_row('Batch size', a.batch_size)
LOG.add_row('Buffer size', a.buffer_size)
LOG.add_row('Min buffer size', a.min_buffer)
LOG.add_row('Verbose', a.verbose)
LOG.add_line()
LOG.add_row('Force MLP', a.force_mlp)
LOG.add_row('Learning rate', a.lr)
LOG.add_row('Gamma', a.gamma)
LOG.add_row('Tau (Polyak)', a.tau)
LOG.add_row('Policy update freq', a.policy_update)
LOG.add_row('Target update freq', a.target_update)
LOG.add_row('Action noise', a.action_noise)
LOG.add_row('Action noise clip', a.action_noise_clip)
LOG.add_row('Max gradient norm', a.max_grad_norm)
LOG.add_row('Explore noise', a.explore_noise)
LOG.add_row('Explore noise mean', a.explore_noise_mean)
LOG.add_row('Explore noise scale', a.explore_noise_scale)
LOG.flush('WARNING')
set_global_seeds(a.seed)
# === Make envs ===
env = make_env(a, eval=False)
eval_env = make_env(a, eval=True)
LOG.debug('Action space: {}'.format(env.action_space))
LOG.debug('Observation space: {}'.format(env.observation_space))
# === Create action noise ===
if a.explore_noise:
explore_noise = NormalActionNoise(a.explore_noise_mean,
a.explore_noise_scale)
else:
explore_noise = None
# === Create model ===
try:
model = TD3(env, learning_rate = a.lr,
buffer_size = a.buffer_size,
min_buffer = a.min_buffer,
n_steps = a.num_steps,
n_gradsteps = a.num_gradsteps,
batch_size = a.batch_size,
policy_update = a.policy_update,
gamma = a.gamma,
tau = a.tau,
max_grad_norm = a.max_grad_norm,
action_noise = a.action_noise,
action_noise_clip = a.action_noise_clip,
explore_noise = explore_noise,
force_mlp = a.force_mlp,
verbose = a.verbose)
# Total timesteps = num_steps * num_envs * num_episodes (default ~ 1M)
model.learn(a.num_steps * a.num_envs * a.num_epochs,
tb_logdir = a.tb_logdir,
log_interval = a.log_interval,
eval_env = eval_env,
eval_interval = a.eval_interval,
eval_episodes = a.eval_episodes,
eval_max_steps = a.eval_max_steps,
save_interval = a.save_interval,
save_path = a.model_dir,
target_update = a.target_update)
LOG.info('DONE')
# Save complete model (continue training)
saved_path = model.save(a.model_dir)
LOG.info('Saving model to: {}'.format(saved_path))
# load the "latest" checkpoint
loaded_model = TD3.load(a.model_dir)
# or you can directly load from saved_path
# loaded_model = TD3.load(saved_path)
# set env to continue training
# loaded_model.set_env(env)
# loaded_model.learn(a.num_steps * a.num_envs * a.num_episodes * 2,
# tb_logdir = a.tb_logdir,
# log_interval = a.log_interval,
# eval_env = eval_env,
# eval_interval = a.eval_interval,
# eval_episodes = a.eval_episodes,
# eval_max_steps = a.eval_max_steps)
# Save agent only
# saved_path = model.agent.save(a.model_dir)
# LOG.info('Saving model to: {}'.format(saved_path))
# loaded_model = TD3Agent.load(saved_path)
# Evaluation
LOG.info('Evaluating model (Latest checkpoint)')
eps_rews, eps_steps = loaded_model.eval(eval_env, n_episodes=20)
max_idx = np.argmax(eps_rews)
max_rews = eps_rews[max_idx]
max_steps = eps_steps[max_idx]
mean_rews = np.mean(eps_rews)
std_rews = np.std(eps_rews)
mean_steps = np.mean(eps_steps)
# === Print evaluation results ===
LOG.set_header('Final Evaluation Results')
LOG.add_line()
LOG.add_row('Max rewards', max_rews)
LOG.add_row(' Length', max_steps)
LOG.add_line()
LOG.add_row('Mean rewards', mean_rews.round(3))
LOG.add_row('Std rewards', std_rews, fmt='{}: {:.6f}')
LOG.add_row('Mean length', mean_steps)
LOG.add_line()
LOG.flush('INFO')
# load the "best" checkpoints
loaded_model = TD3.load(a.model_dir, best=True)
LOG.info('Evaluating model (Best checkpoint)')
eps_rews, eps_steps = loaded_model.eval(eval_env, n_episodes=20)
max_idx = np.argmax(eps_rews)
max_rews = eps_rews[max_idx]
max_steps = eps_steps[max_idx]
mean_rews = np.mean(eps_rews)
std_rews = np.std(eps_rews)
mean_steps = np.mean(eps_steps)
# === Print evaluation results ===
LOG.set_header('Final Evaluation Results')
LOG.add_line()
LOG.add_row('Max rewards', max_rews)
LOG.add_row(' Length', max_steps)
LOG.add_line()
LOG.add_row('Mean rewards', mean_rews.round(3))
LOG.add_row('Std rewards', std_rews, fmt='{}: {:.6f}')
LOG.add_row('Mean length', mean_steps)
LOG.add_line()
LOG.flush('INFO')
except:
LOG.exception('Exception occurred')
env.close()
eval_env.close()
exit(1)
env.close()
eval_env.close()
| 48.058642 | 148 | 0.594759 |
f08e9fef94779e8de47ef9a93ff00a1b7906bd4b
| 2,901 |
py
|
Python
|
APIs/samples/ScanerAPI/EVAL_LONGITUDINAL_CTRL/python/eval_longitudinal_ctrl_pedalPos.py
|
AVSGuillaume/SCANeR-Samples-Pack
|
fb872ebb77d2faeae25e74ad11a2e947cd0e0ff5
|
[
"MIT"
] | null | null | null |
APIs/samples/ScanerAPI/EVAL_LONGITUDINAL_CTRL/python/eval_longitudinal_ctrl_pedalPos.py
|
AVSGuillaume/SCANeR-Samples-Pack
|
fb872ebb77d2faeae25e74ad11a2e947cd0e0ff5
|
[
"MIT"
] | null | null | null |
APIs/samples/ScanerAPI/EVAL_LONGITUDINAL_CTRL/python/eval_longitudinal_ctrl_pedalPos.py
|
AVSGuillaume/SCANeR-Samples-Pack
|
fb872ebb77d2faeae25e74ad11a2e947cd0e0ff5
|
[
"MIT"
] | 5 |
2022-02-01T06:27:22.000Z
|
2022-03-16T13:19:49.000Z
|
#!*****************************************************************************
#* \project : SCANeR_API *
#* \file : EVAL_LONGITUDINAL_CTRL.py *
#* \Brief : Radar sensor, targets detection with SCANeR API. *
#* \Copyright: OKTAL S.A. all rights reserved *
# *****************************************************************************/
#!/usr/bin/python
import os
import inspect
from time import sleep
this_file = inspect.currentframe().f_code.co_filename
this_dir = os.path.dirname(this_file)
# to find scaner_api dll
if (os.name == 'nt'):
os.chdir(os.path.abspath(os.environ['STUDIO_PATH']+'./SCANeRstudio_2022/APIs/bin/x64/vs2019'))
from scaner import *
parser = ScanerApiOption()
(options, args) = parser.parse_args()
Process_InitParams(options.process_name, options.configuration, ctypes.c_float(options.frequency))
status = PS_DAEMON
try:
# read access to radar and ExportChannel
radar_300000 = Com_declareInputData('Network/ISensor/SensorTargets', 300000);
EC_1000 = Com_declareInputData('Network/IUser/ExportChannel', 1000);
# write access to Shared Memory for longitudinal control
CabToModelCorrective_0 = Com_declareOutputData('Shm/ModelCabin/CabToModelCorrective', 0);
distanceToCollision = -1;
while status != PS_DEAD:
# Process manager Run
Process_Wait()
Process_Run()
#Process manager State
old_status = status
status = Process_GetState()
TimeOfUpdate = Process_GetTime()
if status == PS_RUNNING:
time = Process_GetTime();
if Com_updateInputs(UT_NetworkData) == 0:
print('Update Network inputs failed...')
targetsCount = Com_getShortData(radar_300000, "targetsArrayCount")
throttle = 0;
brakePedal = 0;
if time < 10:
throttle = .4;
brakePedal = 0;
else:
if time < 20:
throttle = .08;
brakePedal = 0;
else:
throttle = 0;
brakePedal = 50;
Com_setDoubleData(CabToModelCorrective_0, "AcceleratorAdditive", throttle);
Com_setDoubleData(CabToModelCorrective_0, "AcceleratorMultiplicative", 0);
Com_setDoubleData(CabToModelCorrective_0, "BrakeAdditive", brakePedal);
Com_setDoubleData(CabToModelCorrective_0, "BrakeMultiplicative", 0);
Com_setDoubleData(CabToModelCorrective_0, "TimeOfUpdate", TimeOfUpdate);
if Com_updateOutputs(UT_ShmData) == 0: #flush the corrective message
print('Update Shm outputs failed...')
except KeyboardInterrupt:
print('Bye bye')
Process_Close()
| 40.291667 | 98 | 0.574629 |
f61f9c1205e0e6a18102b4f63bafbab82b71903c
| 37,120 |
py
|
Python
|
.v/lib/python3.6/site-packages/ansible/modules/monitoring/zabbix/zabbix_host.py
|
binRick/ansible-callback-concise
|
fd7b05596b30872af3f79a32f223a0458bffbedd
|
[
"MIT"
] | 1 |
2020-03-22T01:04:39.000Z
|
2020-03-22T01:04:39.000Z
|
.v/lib/python3.6/site-packages/ansible/modules/monitoring/zabbix/zabbix_host.py
|
binRick/ansible-callback-concise
|
fd7b05596b30872af3f79a32f223a0458bffbedd
|
[
"MIT"
] | null | null | null |
.v/lib/python3.6/site-packages/ansible/modules/monitoring/zabbix/zabbix_host.py
|
binRick/ansible-callback-concise
|
fd7b05596b30872af3f79a32f223a0458bffbedd
|
[
"MIT"
] | 1 |
2020-03-22T01:04:48.000Z
|
2020-03-22T01:04:48.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013-2014, Epic Games, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zabbix_host
short_description: Create/update/delete Zabbix hosts
description:
- This module allows you to create, modify and delete Zabbix host entries and associated group and template data.
version_added: "2.0"
author:
- "Cove (@cove)"
- Tony Minfei Ding (!UNKNOWN)
- Harrison Gu (@harrisongu)
- Werner Dijkerman (@dj-wasabi)
- Eike Frost (@eikef)
requirements:
- "python >= 2.6"
- "zabbix-api >= 0.5.3"
options:
host_name:
description:
- Name of the host in Zabbix.
- host_name is the unique identifier used and cannot be updated using this module.
required: true
visible_name:
description:
- Visible name of the host in Zabbix.
version_added: '2.3'
description:
description:
- Description of the host in Zabbix.
version_added: '2.5'
host_groups:
description:
- List of host groups the host is part of.
link_templates:
description:
- List of templates linked to the host.
inventory_mode:
description:
- Configure the inventory mode.
choices: ['automatic', 'manual', 'disabled']
version_added: '2.1'
inventory_zabbix:
description:
- Add Facts for a zabbix inventory (e.g. Tag) (see example below).
- Please review the interface documentation for more information on the supported properties
- 'https://www.zabbix.com/documentation/3.2/manual/api/reference/host/object#host_inventory'
version_added: '2.5'
status:
description:
- Monitoring status of the host.
choices: ['enabled', 'disabled']
default: 'enabled'
state:
description:
- State of the host.
- On C(present), it will create if host does not exist or update the host if the associated data is different.
- On C(absent) will remove a host if it exists.
choices: ['present', 'absent']
default: 'present'
proxy:
description:
- The name of the Zabbix proxy to be used.
interfaces:
description:
- List of interfaces to be created for the host (see example below).
- 'Available keys are: I(dns), I(ip), I(main), I(port), I(type), I(useip), and I(bulk).'
- Please review the interface documentation for more information on the supported properties
- 'https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface'
- If an interface definition is incomplete, this module will attempt to fill in sensible values.
- I(type) can also be C(agent), C(snmp), C(ipmi), or C(jmx) instead of its numerical value.
default: []
tls_connect:
description:
- Specifies what encryption to use for outgoing connections.
- Possible values, 1 (no encryption), 2 (PSK), 4 (certificate).
- Works only with >= Zabbix 3.0
default: 1
version_added: '2.5'
tls_accept:
description:
- Specifies what types of connections are allowed for incoming connections.
- The tls_accept parameter accepts values of 1 to 7
- Possible values, 1 (no encryption), 2 (PSK), 4 (certificate).
- Values can be combined.
- Works only with >= Zabbix 3.0
default: 1
version_added: '2.5'
tls_psk_identity:
description:
- It is a unique name by which this specific PSK is referred to by Zabbix components
- Do not put sensitive information in the PSK identity string, it is transmitted over the network unencrypted.
- Works only with >= Zabbix 3.0
version_added: '2.5'
tls_psk:
description:
- PSK value is a hard to guess string of hexadecimal digits.
- The preshared key, at least 32 hex digits. Required if either tls_connect or tls_accept has PSK enabled.
- Works only with >= Zabbix 3.0
version_added: '2.5'
ca_cert:
description:
- Required certificate issuer.
- Works only with >= Zabbix 3.0
version_added: '2.5'
aliases: [ tls_issuer ]
tls_subject:
description:
- Required certificate subject.
- Works only with >= Zabbix 3.0
version_added: '2.5'
ipmi_authtype:
description:
- IPMI authentication algorithm.
- Please review the Host object documentation for more information on the supported properties
- 'https://www.zabbix.com/documentation/3.4/manual/api/reference/host/object'
- Possible values are, C(0) (none), C(1) (MD2), C(2) (MD5), C(4) (straight), C(5) (OEM), C(6) (RMCP+),
with -1 being the API default.
- Please note that the Zabbix API will treat absent settings as default when updating
any of the I(ipmi_)-options; this means that if you attempt to set any of the four
options individually, the rest will be reset to default values.
version_added: '2.5'
ipmi_privilege:
description:
- IPMI privilege level.
- Please review the Host object documentation for more information on the supported properties
- 'https://www.zabbix.com/documentation/3.4/manual/api/reference/host/object'
- Possible values are C(1) (callback), C(2) (user), C(3) (operator), C(4) (admin), C(5) (OEM), with C(2)
being the API default.
- also see the last note in the I(ipmi_authtype) documentation
version_added: '2.5'
ipmi_username:
description:
- IPMI username.
- also see the last note in the I(ipmi_authtype) documentation
version_added: '2.5'
ipmi_password:
description:
- IPMI password.
- also see the last note in the I(ipmi_authtype) documentation
version_added: '2.5'
force:
description:
- Overwrite the host configuration, even if already present.
type: bool
default: 'yes'
version_added: '2.0'
extends_documentation_fragment:
- zabbix
'''
EXAMPLES = '''
- name: Create a new host or update an existing host's info
local_action:
module: zabbix_host
server_url: http://monitor.example.com
login_user: username
login_password: password
host_name: ExampleHost
visible_name: ExampleName
description: My ExampleHost Description
host_groups:
- Example group1
- Example group2
link_templates:
- Example template1
- Example template2
status: enabled
state: present
inventory_mode: manual
inventory_zabbix:
tag: "{{ your_tag }}"
alias: "{{ your_alias }}"
notes: "Special Informations: {{ your_informations | default('None') }}"
location: "{{ your_location }}"
site_rack: "{{ your_site_rack }}"
os: "{{ your_os }}"
hardware: "{{ your_hardware }}"
ipmi_authtype: 2
ipmi_privilege: 4
ipmi_username: username
ipmi_password: password
interfaces:
- type: 1
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 10050
- type: 4
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 12345
proxy: a.zabbix.proxy
- name: Update an existing host's TLS settings
local_action:
module: zabbix_host
server_url: http://monitor.example.com
login_user: username
login_password: password
host_name: ExampleHost
visible_name: ExampleName
host_groups:
- Example group1
tls_psk_identity: test
tls_connect: 2
tls_psk: 123456789abcdef123456789abcdef12
'''
import atexit
import copy
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
# Extend the ZabbixAPI
# Since the zabbix-api python module too old (version 1.0, no higher version so far),
# it does not support the 'hostinterface' api calls,
# so we have to inherit the ZabbixAPI class to add 'hostinterface' support.
class ZabbixAPIExtends(ZabbixAPI):
hostinterface = None
def __init__(self, server, timeout, user, passwd, validate_certs, **kwargs):
ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd, validate_certs=validate_certs)
self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs))
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
from ansible.module_utils.basic import AnsibleModule
class Host(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# exist host
def is_host_exist(self, host_name):
result = self._zapi.host.get({'filter': {'host': host_name}})
return result
# check if host group exists
def check_host_group_exist(self, group_names):
for group_name in group_names:
result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
if not result:
self._module.fail_json(msg="Hostgroup not found: %s" % group_name)
return True
def get_template_ids(self, template_list):
template_ids = []
if template_list is None or len(template_list) == 0:
return template_ids
for template in template_list:
template_list = self._zapi.template.get({'output': 'extend', 'filter': {'host': template}})
if len(template_list) < 1:
self._module.fail_json(msg="Template not found: %s" % template)
else:
template_id = template_list[0]['templateid']
template_ids.append(template_id)
return template_ids
def add_host(self, host_name, group_ids, status, interfaces, proxy_id, visible_name, description, tls_connect,
tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege,
ipmi_username, ipmi_password):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status,
'tls_connect': tls_connect, 'tls_accept': tls_accept}
if proxy_id:
parameters['proxy_hostid'] = proxy_id
if visible_name:
parameters['name'] = visible_name
if tls_psk_identity is not None:
parameters['tls_psk_identity'] = tls_psk_identity
if tls_psk is not None:
parameters['tls_psk'] = tls_psk
if tls_issuer is not None:
parameters['tls_issuer'] = tls_issuer
if tls_subject is not None:
parameters['tls_subject'] = tls_subject
if description:
parameters['description'] = description
if ipmi_authtype is not None:
parameters['ipmi_authtype'] = ipmi_authtype
if ipmi_privilege is not None:
parameters['ipmi_privilege'] = ipmi_privilege
if ipmi_username is not None:
parameters['ipmi_username'] = ipmi_username
if ipmi_password is not None:
parameters['ipmi_password'] = ipmi_password
host_list = self._zapi.host.create(parameters)
if len(host_list) >= 1:
return host_list['hostids'][0]
except Exception as e:
self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e))
def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id,
visible_name, description, tls_connect, tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject, ipmi_authtype,
ipmi_privilege, ipmi_username, ipmi_password):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {'hostid': host_id, 'groups': group_ids, 'status': status, 'tls_connect': tls_connect,
'tls_accept': tls_accept}
if proxy_id >= 0:
parameters['proxy_hostid'] = proxy_id
if visible_name:
parameters['name'] = visible_name
if tls_psk_identity:
parameters['tls_psk_identity'] = tls_psk_identity
if tls_psk:
parameters['tls_psk'] = tls_psk
if tls_issuer:
parameters['tls_issuer'] = tls_issuer
if tls_subject:
parameters['tls_subject'] = tls_subject
if description:
parameters['description'] = description
if ipmi_authtype:
parameters['ipmi_authtype'] = ipmi_authtype
if ipmi_privilege:
parameters['ipmi_privilege'] = ipmi_privilege
if ipmi_username:
parameters['ipmi_username'] = ipmi_username
if ipmi_password:
parameters['ipmi_password'] = ipmi_password
self._zapi.host.update(parameters)
interface_list_copy = exist_interface_list
if interfaces:
for interface in interfaces:
flag = False
interface_str = interface
for exist_interface in exist_interface_list:
interface_type = int(interface['type'])
exist_interface_type = int(exist_interface['type'])
if interface_type == exist_interface_type:
# update
interface_str['interfaceid'] = exist_interface['interfaceid']
self._zapi.hostinterface.update(interface_str)
flag = True
interface_list_copy.remove(exist_interface)
break
if not flag:
# add
interface_str['hostid'] = host_id
self._zapi.hostinterface.create(interface_str)
# remove
remove_interface_ids = []
for remove_interface in interface_list_copy:
interface_id = remove_interface['interfaceid']
remove_interface_ids.append(interface_id)
if len(remove_interface_ids) > 0:
self._zapi.hostinterface.delete(remove_interface_ids)
except Exception as e:
self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e))
def delete_host(self, host_id, host_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.delete([host_id])
except Exception as e:
self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e))
# get host by host name
def get_host_by_host_name(self, host_name):
host_list = self._zapi.host.get({'output': 'extend', 'selectInventory': 'extend', 'filter': {'host': [host_name]}})
if len(host_list) < 1:
self._module.fail_json(msg="Host not found: %s" % host_name)
else:
return host_list[0]
# get proxyid by proxy name
def get_proxyid_by_proxy_name(self, proxy_name):
proxy_list = self._zapi.proxy.get({'output': 'extend', 'filter': {'host': [proxy_name]}})
if len(proxy_list) < 1:
self._module.fail_json(msg="Proxy not found: %s" % proxy_name)
else:
return int(proxy_list[0]['proxyid'])
# get group ids by group names
def get_group_ids_by_group_names(self, group_names):
group_ids = []
if self.check_host_group_exist(group_names):
group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_names}})
for group in group_list:
group_id = group['groupid']
group_ids.append({'groupid': group_id})
return group_ids
# get host templates by host id
def get_host_templates_by_host_id(self, host_id):
template_ids = []
template_list = self._zapi.template.get({'output': 'extend', 'hostids': host_id})
for template in template_list:
template_ids.append(template['templateid'])
return template_ids
# get host groups by host id
def get_host_groups_by_host_id(self, host_id):
exist_host_groups = []
host_groups_list = self._zapi.hostgroup.get({'output': 'extend', 'hostids': host_id})
if len(host_groups_list) >= 1:
for host_groups_name in host_groups_list:
exist_host_groups.append(host_groups_name['name'])
return exist_host_groups
# check the exist_interfaces whether it equals the interfaces or not
def check_interface_properties(self, exist_interface_list, interfaces):
interfaces_port_list = []
if interfaces is not None:
if len(interfaces) >= 1:
for interface in interfaces:
interfaces_port_list.append(int(interface['port']))
exist_interface_ports = []
if len(exist_interface_list) >= 1:
for exist_interface in exist_interface_list:
exist_interface_ports.append(int(exist_interface['port']))
if set(interfaces_port_list) != set(exist_interface_ports):
return True
for exist_interface in exist_interface_list:
exit_interface_port = int(exist_interface['port'])
for interface in interfaces:
interface_port = int(interface['port'])
if interface_port == exit_interface_port:
for key in interface.keys():
if str(exist_interface[key]) != str(interface[key]):
return True
return False
# get the status of host by host
def get_host_status_by_host(self, host):
return host['status']
# check all the properties before link or clear template
def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids,
exist_interfaces, host, proxy_id, visible_name, description, host_name,
inventory_mode, inventory_zabbix, tls_accept, tls_psk_identity, tls_psk,
tls_issuer, tls_subject, tls_connect, ipmi_authtype, ipmi_privilege,
ipmi_username, ipmi_password):
# get the existing host's groups
exist_host_groups = self.get_host_groups_by_host_id(host_id)
if set(host_groups) != set(exist_host_groups):
return True
# get the existing status
exist_status = self.get_host_status_by_host(host)
if int(status) != int(exist_status):
return True
# check the exist_interfaces whether it equals the interfaces or not
if self.check_interface_properties(exist_interfaces, interfaces):
return True
# get the existing templates
exist_template_ids = self.get_host_templates_by_host_id(host_id)
if set(list(template_ids)) != set(exist_template_ids):
return True
if int(host['proxy_hostid']) != int(proxy_id):
return True
# Check whether the visible_name has changed; Zabbix defaults to the technical hostname if not set.
if visible_name:
if host['name'] != visible_name and host['name'] != host_name:
return True
# Only compare description if it is given as a module parameter
if description:
if host['description'] != description:
return True
if inventory_mode:
if host['inventory']:
if int(host['inventory']['inventory_mode']) != self.inventory_mode_numeric(inventory_mode):
return True
elif inventory_mode != 'disabled':
return True
if inventory_zabbix:
proposed_inventory = copy.deepcopy(host['inventory'])
proposed_inventory.update(inventory_zabbix)
if proposed_inventory != host['inventory']:
return True
if tls_accept is not None and 'tls_accept' in host:
if int(host['tls_accept']) != tls_accept:
return True
if tls_psk_identity is not None and 'tls_psk_identity' in host:
if host['tls_psk_identity'] != tls_psk_identity:
return True
if tls_psk is not None and 'tls_psk' in host:
if host['tls_psk'] != tls_psk:
return True
if tls_issuer is not None and 'tls_issuer' in host:
if host['tls_issuer'] != tls_issuer:
return True
if tls_subject is not None and 'tls_subject' in host:
if host['tls_subject'] != tls_subject:
return True
if tls_connect is not None and 'tls_connect' in host:
if int(host['tls_connect']) != tls_connect:
return True
if ipmi_authtype is not None:
if int(host['ipmi_authtype']) != ipmi_authtype:
return True
if ipmi_privilege is not None:
if int(host['ipmi_privilege']) != ipmi_privilege:
return True
if ipmi_username is not None:
if host['ipmi_username'] != ipmi_username:
return True
if ipmi_password is not None:
if host['ipmi_password'] != ipmi_password:
return True
return False
# link or clear template of the host
def link_or_clear_template(self, host_id, template_id_list, tls_connect, tls_accept, tls_psk_identity, tls_psk,
tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password):
# get host's exist template ids
exist_template_id_list = self.get_host_templates_by_host_id(host_id)
exist_template_ids = set(exist_template_id_list)
template_ids = set(template_id_list)
template_id_list = list(template_ids)
# get unlink and clear templates
templates_clear = exist_template_ids.difference(template_ids)
templates_clear_list = list(templates_clear)
request_str = {'hostid': host_id, 'templates': template_id_list, 'templates_clear': templates_clear_list,
'tls_connect': tls_connect, 'tls_accept': tls_accept, 'ipmi_authtype': ipmi_authtype,
'ipmi_privilege': ipmi_privilege, 'ipmi_username': ipmi_username, 'ipmi_password': ipmi_password}
if tls_psk_identity is not None:
request_str['tls_psk_identity'] = tls_psk_identity
if tls_psk is not None:
request_str['tls_psk'] = tls_psk
if tls_issuer is not None:
request_str['tls_issuer'] = tls_issuer
if tls_subject is not None:
request_str['tls_subject'] = tls_subject
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update(request_str)
except Exception as e:
self._module.fail_json(msg="Failed to link template to host: %s" % e)
def inventory_mode_numeric(self, inventory_mode):
if inventory_mode == "automatic":
return int(1)
elif inventory_mode == "manual":
return int(0)
elif inventory_mode == "disabled":
return int(-1)
return inventory_mode
# Update the host inventory_mode
def update_inventory_mode(self, host_id, inventory_mode):
# nothing was set, do nothing
if not inventory_mode:
return
inventory_mode = self.inventory_mode_numeric(inventory_mode)
# watch for - https://support.zabbix.com/browse/ZBX-6033
request_str = {'hostid': host_id, 'inventory_mode': inventory_mode}
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update(request_str)
except Exception as e:
self._module.fail_json(msg="Failed to set inventory_mode to host: %s" % e)
def update_inventory_zabbix(self, host_id, inventory):
if not inventory:
return
request_str = {'hostid': host_id, 'inventory': inventory}
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update(request_str)
except Exception as e:
self._module.fail_json(msg="Failed to set inventory to host: %s" % e)
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
host_name=dict(type='str', required=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
host_groups=dict(type='list', required=False),
link_templates=dict(type='list', required=False),
status=dict(default="enabled", choices=['enabled', 'disabled']),
state=dict(default="present", choices=['present', 'absent']),
inventory_mode=dict(required=False, choices=['automatic', 'manual', 'disabled']),
ipmi_authtype=dict(type='int', default=None),
ipmi_privilege=dict(type='int', default=None),
ipmi_username=dict(type='str', required=False, default=None),
ipmi_password=dict(type='str', required=False, default=None, no_log=True),
tls_connect=dict(type='int', default=1),
tls_accept=dict(type='int', default=1),
tls_psk_identity=dict(type='str', required=False),
tls_psk=dict(type='str', required=False),
ca_cert=dict(type='str', required=False, aliases=['tls_issuer']),
tls_subject=dict(type='str', required=False),
inventory_zabbix=dict(required=False, type='dict'),
timeout=dict(type='int', default=10),
interfaces=dict(type='list', required=False),
force=dict(type='bool', default=True),
proxy=dict(type='str', required=False),
visible_name=dict(type='str', required=False),
description=dict(type='str', required=False)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing required zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
validate_certs = module.params['validate_certs']
host_name = module.params['host_name']
visible_name = module.params['visible_name']
description = module.params['description']
host_groups = module.params['host_groups']
link_templates = module.params['link_templates']
inventory_mode = module.params['inventory_mode']
ipmi_authtype = module.params['ipmi_authtype']
ipmi_privilege = module.params['ipmi_privilege']
ipmi_username = module.params['ipmi_username']
ipmi_password = module.params['ipmi_password']
tls_connect = module.params['tls_connect']
tls_accept = module.params['tls_accept']
tls_psk_identity = module.params['tls_psk_identity']
tls_psk = module.params['tls_psk']
tls_issuer = module.params['ca_cert']
tls_subject = module.params['tls_subject']
inventory_zabbix = module.params['inventory_zabbix']
status = module.params['status']
state = module.params['state']
timeout = module.params['timeout']
interfaces = module.params['interfaces']
force = module.params['force']
proxy = module.params['proxy']
# convert enabled to 0; disabled to 1
status = 1 if status == "disabled" else 0
zbx = None
# login to zabbix
try:
zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password,
validate_certs=validate_certs)
zbx.login(login_user, login_password)
atexit.register(zbx.logout)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
host = Host(module, zbx)
template_ids = []
if link_templates:
template_ids = host.get_template_ids(link_templates)
group_ids = []
if host_groups:
group_ids = host.get_group_ids_by_group_names(host_groups)
ip = ""
if interfaces:
# ensure interfaces are well-formed
for interface in interfaces:
if 'type' not in interface:
module.fail_json(msg="(interface) type needs to be specified for interface '%s'." % interface)
interfacetypes = {'agent': 1, 'snmp': 2, 'ipmi': 3, 'jmx': 4}
if interface['type'] in interfacetypes.keys():
interface['type'] = interfacetypes[interface['type']]
if interface['type'] < 1 or interface['type'] > 4:
module.fail_json(msg="Interface type can only be 1-4 for interface '%s'." % interface)
if 'useip' not in interface:
interface['useip'] = 0
if 'dns' not in interface:
if interface['useip'] == 0:
module.fail_json(msg="dns needs to be set if useip is 0 on interface '%s'." % interface)
interface['dns'] = ''
if 'ip' not in interface:
if interface['useip'] == 1:
module.fail_json(msg="ip needs to be set if useip is 1 on interface '%s'." % interface)
interface['ip'] = ''
if 'main' not in interface:
interface['main'] = 0
if 'port' not in interface:
if interface['type'] == 1:
interface['port'] = "10050"
elif interface['type'] == 2:
interface['port'] = "161"
elif interface['type'] == 3:
interface['port'] = "623"
elif interface['type'] == 4:
interface['port'] = "12345"
if interface['type'] == 1:
ip = interface['ip']
# Use proxy specified, or set to 0
if proxy:
proxy_id = host.get_proxyid_by_proxy_name(proxy)
else:
proxy_id = 0
# check if host exist
is_host_exist = host.is_host_exist(host_name)
if is_host_exist:
# get host id by host name
zabbix_host_obj = host.get_host_by_host_name(host_name)
host_id = zabbix_host_obj['hostid']
# If proxy is not specified as a module parameter, use the existing setting
if proxy is None:
proxy_id = int(zabbix_host_obj['proxy_hostid'])
if state == "absent":
# remove host
host.delete_host(host_id, host_name)
module.exit_json(changed=True, result="Successfully delete host %s" % host_name)
else:
if not host_groups:
# if host_groups have not been specified when updating an existing host, just
# get the group_ids from the existing host without updating them.
host_groups = host.get_host_groups_by_host_id(host_id)
group_ids = host.get_group_ids_by_group_names(host_groups)
# get existing host's interfaces
exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id})
# if no interfaces were specified with the module, start with an empty list
if not interfaces:
interfaces = []
# When force=no is specified, append existing interfaces to interfaces to update. When
# no interfaces have been specified, copy existing interfaces as specified from the API.
# Do the same with templates and host groups.
if not force or not interfaces:
for interface in copy.deepcopy(exist_interfaces):
# remove values not used during hostinterface.add/update calls
for key in tuple(interface.keys()):
if key in ['interfaceid', 'hostid', 'bulk']:
interface.pop(key, None)
for index in interface.keys():
if index in ['useip', 'main', 'type', 'port']:
interface[index] = int(interface[index])
if interface not in interfaces:
interfaces.append(interface)
if not force or link_templates is None:
template_ids = list(set(template_ids + host.get_host_templates_by_host_id(host_id)))
if not force:
for group_id in host.get_group_ids_by_group_names(host.get_host_groups_by_host_id(host_id)):
if group_id not in group_ids:
group_ids.append(group_id)
# update host
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
exist_interfaces, zabbix_host_obj, proxy_id, visible_name,
description, host_name, inventory_mode, inventory_zabbix,
tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject, tls_connect,
ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password):
host.update_host(host_name, group_ids, status, host_id,
interfaces, exist_interfaces, proxy_id, visible_name, description, tls_connect, tls_accept,
tls_psk_identity, tls_psk, tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password)
host.link_or_clear_template(host_id, template_ids, tls_connect, tls_accept, tls_psk_identity,
tls_psk, tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege,
ipmi_username, ipmi_password)
host.update_inventory_mode(host_id, inventory_mode)
host.update_inventory_zabbix(host_id, inventory_zabbix)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
else:
module.exit_json(changed=False)
else:
if state == "absent":
# the host is already deleted.
module.exit_json(changed=False)
if not group_ids:
module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name)
if not interfaces or (interfaces and len(interfaces) == 0):
module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name)
# create host
host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id, visible_name, description, tls_connect,
tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege,
ipmi_username, ipmi_password)
host.link_or_clear_template(host_id, template_ids, tls_connect, tls_accept, tls_psk_identity,
tls_psk, tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password)
host.update_inventory_mode(host_id, inventory_mode)
host.update_inventory_zabbix(host_id, inventory_zabbix)
module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % (
host_name, ip, link_templates))
if __name__ == '__main__':
main()
| 43.11266 | 145 | 0.607462 |
0602c4ff576225a31ae3c98b25777941238ad55d
| 15,215 |
py
|
Python
|
django/apps/registry.py
|
jedie/django
|
09f2cdbe1a43e79e31f5ea509b59d4c87db29832
|
[
"BSD-3-Clause"
] | null | null | null |
django/apps/registry.py
|
jedie/django
|
09f2cdbe1a43e79e31f5ea509b59d4c87db29832
|
[
"BSD-3-Clause"
] | null | null | null |
django/apps/registry.py
|
jedie/django
|
09f2cdbe1a43e79e31f5ea509b59d4c87db29832
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import threading
import warnings
from collections import Counter, OrderedDict, defaultdict
from functools import partial
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from django.utils import lru_cache
from .config import AppConfig
class Apps(object):
"""
A registry that stores the configuration of installed applications.
It also keeps track of models eg. to provide reverse-relations.
"""
def __init__(self, installed_apps=()):
# installed_apps is set to None when creating the master registry
# because it cannot be populated at that point. Other registries must
# provide a list of installed apps and are populated immediately.
if installed_apps is None and hasattr(sys.modules[__name__], 'apps'):
raise RuntimeError("You must supply an installed_apps argument.")
# Mapping of app labels => model names => model classes. Every time a
# model is imported, ModelBase.__new__ calls apps.register_model which
# creates an entry in all_models. All imported models are registered,
# regardless of whether they're defined in an installed application
# and whether the registry has been populated. Since it isn't possible
# to reimport a module safely (it could reexecute initialization code)
# all_models is never overridden or reset.
self.all_models = defaultdict(OrderedDict)
# Mapping of labels to AppConfig instances for installed apps.
self.app_configs = OrderedDict()
# Stack of app_configs. Used to store the current state in
# set_available_apps and set_installed_apps.
self.stored_app_configs = []
# Whether the registry is populated.
self.apps_ready = self.models_ready = self.ready = False
# Lock for thread-safe population.
self._lock = threading.Lock()
# Maps ("app_label", "modelname") tuples to lists of functions to be
# called when the corresponding model is ready. Used by this class's
# `lazy_model_operation()` and `do_pending_operations()` methods.
self._pending_operations = defaultdict(list)
# Populate apps and models, unless it's the master registry.
if installed_apps is not None:
self.populate(installed_apps)
def populate(self, installed_apps=None):
"""
Loads application configurations and models.
This method imports each application module and then each model module.
It is thread safe and idempotent, but not reentrant.
"""
if self.ready:
return
# populate() might be called by two threads in parallel on servers
# that create threads before initializing the WSGI callable.
with self._lock:
if self.ready:
return
# app_config should be pristine, otherwise the code below won't
# guarantee that the order matches the order in INSTALLED_APPS.
if self.app_configs:
raise RuntimeError("populate() isn't reentrant")
# Load app configs and app modules.
for entry in installed_apps:
if isinstance(entry, AppConfig):
app_config = entry
else:
app_config = AppConfig.create(entry)
if app_config.label in self.app_configs:
raise ImproperlyConfigured(
"Application labels aren't unique, "
"duplicates: %s" % app_config.label)
self.app_configs[app_config.label] = app_config
# Check for duplicate app names.
counts = Counter(
app_config.name for app_config in self.app_configs.values())
duplicates = [
name for name, count in counts.most_common() if count > 1]
if duplicates:
raise ImproperlyConfigured(
"Application names aren't unique, "
"duplicates: %s" % ", ".join(duplicates))
self.apps_ready = True
# Load models.
for app_config in self.app_configs.values():
all_models = self.all_models[app_config.label]
app_config.import_models(all_models)
self.clear_cache()
self.models_ready = True
for app_config in self.get_app_configs():
app_config.ready()
self.ready = True
def check_apps_ready(self):
"""
Raises an exception if all apps haven't been imported yet.
"""
if not self.apps_ready:
raise AppRegistryNotReady("Apps aren't loaded yet.")
def check_models_ready(self):
"""
Raises an exception if all models haven't been imported yet.
"""
if not self.models_ready:
raise AppRegistryNotReady("Models aren't loaded yet.")
def get_app_configs(self):
"""
Imports applications and returns an iterable of app configs.
"""
self.check_apps_ready()
return self.app_configs.values()
def get_app_config(self, app_label):
"""
Imports applications and returns an app config for the given label.
Raises LookupError if no application exists with this label.
"""
self.check_apps_ready()
try:
return self.app_configs[app_label]
except KeyError:
message = "No installed app with label '%s'." % app_label
for app_config in self.get_app_configs():
if app_config.name == app_label:
message += " Did you mean '%s'?" % app_config.label
break
raise LookupError(message)
# This method is performance-critical at least for Django's test suite.
@lru_cache.lru_cache(maxsize=None)
def get_models(self, include_auto_created=False,
include_deferred=False, include_swapped=False):
"""
Returns a list of all installed models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models created to satisfy deferred attribute queries,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
"""
self.check_models_ready()
result = []
for app_config in self.app_configs.values():
result.extend(list(app_config.get_models(
include_auto_created, include_deferred, include_swapped)))
return result
def get_model(self, app_label, model_name=None):
"""
Returns the model matching the given app_label and model_name.
As a shortcut, this function also accepts a single argument in the
form <app_label>.<model_name>.
model_name is case-insensitive.
Raises LookupError if no application exists with this label, or no
model exists with this name in the application. Raises ValueError if
called with a single argument that doesn't contain exactly one dot.
"""
self.check_models_ready()
if model_name is None:
app_label, model_name = app_label.split('.')
return self.get_app_config(app_label).get_model(model_name.lower())
def register_model(self, app_label, model):
# Since this method is called when models are imported, it cannot
# perform imports because of the risk of import loops. It mustn't
# call get_app_config().
model_name = model._meta.model_name
app_models = self.all_models[app_label]
if model_name in app_models:
if (model.__name__ == app_models[model_name].__name__ and
model.__module__ == app_models[model_name].__module__):
warnings.warn(
"Model '%s.%s' was already registered. "
"Reloading models is not advised as it can lead to inconsistencies, "
"most notably with related models." % (model_name, app_label),
RuntimeWarning, stacklevel=2)
else:
raise RuntimeError(
"Conflicting '%s' models in application '%s': %s and %s." %
(model_name, app_label, app_models[model_name], model))
app_models[model_name] = model
self.do_pending_operations(model)
self.clear_cache()
def is_installed(self, app_name):
"""
Checks whether an application with this name exists in the registry.
app_name is the full name of the app eg. 'django.contrib.admin'.
"""
self.check_apps_ready()
return any(ac.name == app_name for ac in self.app_configs.values())
def get_containing_app_config(self, object_name):
"""
Look for an app config containing a given object.
object_name is the dotted Python path to the object.
Returns the app config for the inner application in case of nesting.
Returns None if the object isn't in any registered app config.
"""
self.check_apps_ready()
candidates = []
for app_config in self.app_configs.values():
if object_name.startswith(app_config.name):
subpath = object_name[len(app_config.name):]
if subpath == '' or subpath[0] == '.':
candidates.append(app_config)
if candidates:
return sorted(candidates, key=lambda ac: -len(ac.name))[0]
def get_registered_model(self, app_label, model_name):
"""
Similar to get_model(), but doesn't require that an app exists with
the given app_label.
It's safe to call this method at import time, even while the registry
is being populated.
"""
model = self.all_models[app_label].get(model_name.lower())
if model is None:
raise LookupError(
"Model '%s.%s' not registered." % (app_label, model_name))
return model
def set_available_apps(self, available):
"""
Restricts the set of installed apps used by get_app_config[s].
available must be an iterable of application names.
set_available_apps() must be balanced with unset_available_apps().
Primarily used for performance optimization in TransactionTestCase.
This method is safe is the sense that it doesn't trigger any imports.
"""
available = set(available)
installed = set(app_config.name for app_config in self.get_app_configs())
if not available.issubset(installed):
raise ValueError("Available apps isn't a subset of installed "
"apps, extra apps: %s" % ", ".join(available - installed))
self.stored_app_configs.append(self.app_configs)
self.app_configs = OrderedDict(
(label, app_config)
for label, app_config in self.app_configs.items()
if app_config.name in available)
self.clear_cache()
def unset_available_apps(self):
"""
Cancels a previous call to set_available_apps().
"""
self.app_configs = self.stored_app_configs.pop()
self.clear_cache()
def set_installed_apps(self, installed):
"""
Enables a different set of installed apps for get_app_config[s].
installed must be an iterable in the same format as INSTALLED_APPS.
set_installed_apps() must be balanced with unset_installed_apps(),
even if it exits with an exception.
Primarily used as a receiver of the setting_changed signal in tests.
This method may trigger new imports, which may add new models to the
registry of all imported models. They will stay in the registry even
after unset_installed_apps(). Since it isn't possible to replay
imports safely (eg. that could lead to registering listeners twice),
models are registered when they're imported and never removed.
"""
if not self.ready:
raise AppRegistryNotReady("App registry isn't ready yet.")
self.stored_app_configs.append(self.app_configs)
self.app_configs = OrderedDict()
self.apps_ready = self.models_ready = self.ready = False
self.clear_cache()
self.populate(installed)
def unset_installed_apps(self):
"""
Cancels a previous call to set_installed_apps().
"""
self.app_configs = self.stored_app_configs.pop()
self.apps_ready = self.models_ready = self.ready = True
self.clear_cache()
def clear_cache(self):
"""
Clears all internal caches, for methods that alter the app registry.
This is mostly used in tests.
"""
# Call expire cache on each model. This will purge
# the relation tree and the fields cache.
self.get_models.cache_clear()
if self.ready:
# Circumvent self.get_models() to prevent that the cache is refilled.
# This particularly prevents that an empty value is cached while cloning.
for app_config in self.app_configs.values():
for model in app_config.get_models(include_auto_created=True):
model._meta._expire_cache()
def lazy_model_operation(self, function, *model_keys):
"""
Take a function and a number of ("app_label", "modelname") tuples, and
when all the corresponding models have been imported and registered,
call the function with the model classes as its arguments.
The function passed to this method must accept exactly n models as
arguments, where n=len(model_keys).
"""
# If this function depends on more than one model, we recursively turn
# it into a chain of functions that accept a single model argument and
# pass each in turn to lazy_model_operation.
model_key, more_models = model_keys[0], model_keys[1:]
if more_models:
supplied_fn = function
def function(model):
next_function = partial(supplied_fn, model)
self.lazy_model_operation(next_function, *more_models)
# If the model is already loaded, pass it to the function immediately.
# Otherwise, delay execution until the class is prepared.
try:
model_class = self.get_registered_model(*model_key)
except LookupError:
self._pending_operations[model_key].append(function)
else:
function(model_class)
def do_pending_operations(self, model):
"""
Take a newly-prepared model and pass it to each function waiting for
it. This is called at the very end of `Apps.register_model()`.
"""
key = model._meta.app_label, model._meta.model_name
for function in self._pending_operations.pop(key, []):
function(model)
apps = Apps(installed_apps=None)
| 39.725849 | 89 | 0.634045 |
b6fbe51cbaf5ede275748c926a942b625abfd7fb
| 4,523 |
py
|
Python
|
integration-tests/integration/write_pyarrow.py
|
youngsofun/parquet2
|
e8a0c3576d5b43636fd16a942bc392d450344416
|
[
"Apache-2.0"
] | 127 |
2021-03-30T14:18:38.000Z
|
2022-03-28T09:47:39.000Z
|
integration-tests/integration/write_pyarrow.py
|
youngsofun/parquet2
|
e8a0c3576d5b43636fd16a942bc392d450344416
|
[
"Apache-2.0"
] | 90 |
2021-04-02T19:31:39.000Z
|
2022-03-30T20:53:30.000Z
|
integration-tests/integration/write_pyarrow.py
|
youngsofun/parquet2
|
e8a0c3576d5b43636fd16a942bc392d450344416
|
[
"Apache-2.0"
] | 28 |
2021-04-03T07:41:36.000Z
|
2022-03-12T11:18:31.000Z
|
import pyarrow as pa
import pyarrow.parquet
import os
PYARROW_PATH = "fixtures/pyarrow3"
def case_basic_nullable(size=1):
int64 = [0, 1, None, 3, None, 5, 6, 7, None, 9]
float64 = [0.0, 1.0, None, 3.0, None, 5.0, 6.0, 7.0, None, 9.0]
string = ["Hello", None, "aa", "", None, "abc", None, None, "def", "aaa"]
boolean = [True, None, False, False, None, True, None, None, True, True]
fields = [
pa.field("int64", pa.int64()),
pa.field("float64", pa.float64()),
pa.field("string", pa.utf8()),
pa.field("bool", pa.bool_()),
pa.field("date", pa.timestamp("ms")),
pa.field("uint32", pa.uint32()),
]
schema = pa.schema(fields)
return (
{
"int64": int64 * size,
"float64": float64 * size,
"string": string * size,
"bool": boolean * size,
"date": int64 * size,
"uint32": int64 * size,
},
schema,
f"basic_nullable_{size*10}.parquet",
)
def case_basic_required(size=1):
int64 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
float64 = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
string = ["Hello", "bbb", "aa", "", "bbb", "abc", "bbb", "bbb", "def", "aaa"]
boolean = [True, True, False, False, False, True, True, True, True, True]
fields = [
pa.field("int64", pa.int64(), nullable=False),
pa.field("float64", pa.float64(), nullable=False),
pa.field("string", pa.utf8(), nullable=False),
pa.field("bool", pa.bool_(), nullable=False),
pa.field("date", pa.timestamp("ms"), nullable=False),
pa.field("uint32", pa.uint32(), nullable=False),
]
schema = pa.schema(fields)
return (
{
"int64": int64 * size,
"float64": float64 * size,
"string": string * size,
"bool": boolean * size,
"date": int64 * size,
"uint32": int64 * size,
},
schema,
f"basic_required_{size*10}.parquet",
)
def case_nested(size):
items = [[0, 1], None, [2, None, 3], [4, 5, 6], [], [7, 8, 9], None, [10]]
fields = [
pa.field("list_int64", pa.list_(pa.int64())),
]
schema = pa.schema(fields)
return (
{
"list_int64": items * size,
},
schema,
f"nested_nullable_{size*10}.parquet",
)
def case_struct(size):
string = ["Hello", None, "aa", "", None, "abc", None, None, "def", "aaa"]
boolean = [True, None, False, False, None, True, None, None, True, True]
validity = [True, False, False, False, False, False, False, False, False, False]
struct_fields = [
("f1", pa.utf8()),
("f2", pa.bool_()),
]
fields = [
pa.field(
"struct_nullable",
pa.struct(struct_fields),
),
pa.field(
"struct_required",
pa.struct(struct_fields),
),
]
schema = pa.schema(fields)
return (
{
"struct_nullable": pa.StructArray.from_arrays(
[pa.array(string * size), pa.array(boolean * size)],
fields=struct_fields,
mask=pa.array(validity * size),
),
"struct_required": pa.StructArray.from_arrays(
[pa.array(string * size), pa.array(boolean * size)],
fields=struct_fields,
),
},
schema,
f"struct_nullable_{size*10}.parquet",
)
def write_pyarrow(
case, size=1, page_version=1, use_dictionary=False, use_compression=False
):
data, schema, path = case(size)
compression_path = "/snappy" if use_compression else ""
if use_dictionary:
base_path = f"{PYARROW_PATH}/v{page_version}/dict{compression_path}"
else:
base_path = f"{PYARROW_PATH}/v{page_version}/non_dict{compression_path}"
t = pa.table(data, schema=schema)
os.makedirs(base_path, exist_ok=True)
pa.parquet.write_table(
t,
f"{base_path}/{path}",
version=f"{page_version}.0",
data_page_version=f"{page_version}.0",
write_statistics=True,
compression="snappy" if use_compression else None,
use_dictionary=use_dictionary,
)
for case in [case_basic_nullable, case_basic_required, case_nested, case_struct]:
for version in [1, 2]:
for use_dict in [False, True]:
for compression in [False, True]:
write_pyarrow(case, 1, version, use_dict, compression)
| 30.355705 | 84 | 0.539686 |
755d33b0e526e6ec02325d4c386976b925a6813a
| 3,727 |
py
|
Python
|
PlatformAgents/com/cognizant/devops/platformagents/agents/ci/spinnaker/SpinnakerAgent3.py
|
tamilselvansellamuthu/Insights
|
fb75d06df8238fbc8604e4dd7a10775dcb92ff5e
|
[
"Apache-2.0"
] | 49 |
2017-09-05T15:04:00.000Z
|
2022-03-01T18:58:48.000Z
|
PlatformAgents/com/cognizant/devops/platformagents/agents/ci/spinnaker/SpinnakerAgent3.py
|
tamilselvansellamuthu/Insights
|
fb75d06df8238fbc8604e4dd7a10775dcb92ff5e
|
[
"Apache-2.0"
] | 153 |
2017-11-20T09:07:31.000Z
|
2022-03-22T05:36:52.000Z
|
PlatformAgents/com/cognizant/devops/platformagents/agents/ci/spinnaker/SpinnakerAgent3.py
|
tamilselvansellamuthu/Insights
|
fb75d06df8238fbc8604e4dd7a10775dcb92ff5e
|
[
"Apache-2.0"
] | 85 |
2017-09-04T10:20:16.000Z
|
2022-03-28T14:49:39.000Z
|
#-------------------------------------------------------------------------------
# Copyright 2017 Cognizant Technology Solutions
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#-------------------------------------------------------------------------------
'''
Created on Jul 15, 2021
@author: 658723
'''
from dateutil import parser
from datetime import datetime
from datetime import timedelta
from ....core.BaseAgent3 import BaseAgent
import json
class SpinnakerAgent(BaseAgent):
@BaseAgent.timed
def process(self):
baseUrl = self.config.get("baseUrl", '')
applicationsUrl = baseUrl + 'applications'
accessToken = self.getCredential("accessToken")
headers = {"Authorization": "Bearer " + accessToken}
startFrom = self.config.get("startFrom", '')
spinnakerApplications = self.getResponse(applicationsUrl, 'GET', None, None, None, reqHeaders=headers)
responseTemplate = self.getResponseTemplate()
dynamicTemplate = self.config.get('dynamicTemplate', {})
stagesTemplate = dynamicTemplate.get('stages', {})
stageMetadata = dynamicTemplate.get('extensions', {}).get('relationMetadata', None)
executionMetadata = dynamicTemplate.get('metadata', {}).get('executions', None)
for application in spinnakerApplications:
applicationName = application["name"]
data = []
stageData = []
timestamp = self.tracking.get(applicationName, startFrom)
lastUpdatedDate = None
executionsUrl = applicationsUrl + '/' + applicationName + '/executions/search?triggerTimeStartBoundary=' + str(timestamp)
executions = self.getResponse(executionsUrl, 'GET', None, None, None, reqHeaders=headers)
pagenum = 0
fetchNextPage = True
while fetchNextPage:
if len(executions) == 0:
fetchNextPage = False
break
for execution in executions:
data += self.parseResponse(responseTemplate, execution)
stages = execution.get("stages", {})
stageData += self.getStageDetails(stages, stagesTemplate, execution["id"])
if lastUpdatedDate is None:
lastUpdatedDate = execution.get("buildTime")
self.tracking[applicationName] =str(lastUpdatedDate + 1)
self.publishToolsData(data, executionMetadata, "buildTime", None, True)
self.publishToolsData(stageData, stageMetadata, "stageStartTime", None, True)
pagenum = pagenum + 10
executionsPageUrl = executionsUrl + '&startIndex=' + str(pagenum)
executions = self.getResponse(executionsPageUrl, 'GET', None, None, None, reqHeaders=headers)
self.updateTrackingJson(self.tracking)
def getStageDetails(self, stages, template, executionId):
data = []
for stage in stages:
stageData = self.parseResponse(template, stage)
stageData[0]['pipelineExecutionId'] = executionId
data += stageData
return data
if __name__ == "__main__":
SpinnakerAgent()
| 46.012346 | 133 | 0.618997 |
e926086348465b3d926ac9a184c73101af639180
| 10,047 |
py
|
Python
|
test/functional/p2p-acceptblock.py
|
yasirmx/Megacoin
|
f5066e2af768f1d8a4db84e47e1d095a0324570a
|
[
"MIT"
] | null | null | null |
test/functional/p2p-acceptblock.py
|
yasirmx/Megacoin
|
f5066e2af768f1d8a4db84e47e1d095a0324570a
|
[
"MIT"
] | null | null | null |
test/functional/p2p-acceptblock.py
|
yasirmx/Megacoin
|
f5066e2af768f1d8a4db84e47e1d095a0324570a
|
[
"MIT"
] | 1 |
2019-09-01T11:20:29.000Z
|
2019-09-01T11:20:29.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("MEGACOIND", "megacoind"),
help="megacoind binary to test")
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-whitelist=127.0.0.1"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = NodeConnCB() # connects to node0 (not whitelisted)
white_node = NodeConnCB() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
self.log.info("First height 2 block accepted by both nodes")
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in range(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
self.log.info("Second height 2 block accepted only from whitelisted peer")
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in range(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
self.nodes[0].getblock(blocks_h3[0].hash)
self.log.info("Unrequested more-work block accepted from non-whitelisted peer")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
self.log.info("Successfully reorged to length 3 chain from whitelisted peer")
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in range(2):
for i in range(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_jsonrpc(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
self.log.info("Unrequested block far ahead of tip accepted from whitelisted peer")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
| 44.653333 | 107 | 0.667264 |
5d946846a9879af55034593e127d7f20ac0608e3
| 1,378 |
py
|
Python
|
python-new-trunk/sfapi2/sflib/profiler-stats.py
|
raychorn/svn_molten-magma
|
8aa2ff2340707eecae6514943e86f5afba9cd54a
|
[
"CC0-1.0"
] | null | null | null |
python-new-trunk/sfapi2/sflib/profiler-stats.py
|
raychorn/svn_molten-magma
|
8aa2ff2340707eecae6514943e86f5afba9cd54a
|
[
"CC0-1.0"
] | null | null | null |
python-new-trunk/sfapi2/sflib/profiler-stats.py
|
raychorn/svn_molten-magma
|
8aa2ff2340707eecae6514943e86f5afba9cd54a
|
[
"CC0-1.0"
] | null | null | null |
import os, sys, pstats
from stat import *
from vyperlogix.misc import _utils
from vyperlogix.daemon.daemon import Log
cmp_dates = lambda x,y:x > y
theKey = lambda x:x[-1]
_fpath = os.path.abspath('logs/profiler.txt')
fpath = _fpath if (len(sys.argv) == 1) else sys.argv[1] if (os.path.exists(sys.argv[1])) else _fpath
_root_ = os.path.dirname(fpath) if (os.path.isfile(fpath)) else fpath
fname = os.sep.join([_root_,'profiler.txt'])
if (not os.path.exists(fname)):
dname = os.path.dirname(fname)
d_list = [os.sep.join([dname,f]) for f in os.listdir(dname) if (os.path.isdir(os.sep.join([dname,f])))]
d_list = [(f,_utils.dateFromSeconds(os.stat(f)[ST_MTIME],useLocalTime=_utils.isUsingLocalTimeConversions)) for f in d_list]
d_list.sort(cmp_dates,theKey)
while (len(d_list) > 0):
t = d_list.pop()
dlogs = os.sep.join([t[0],'logs'])
dplogs = os.sep.join([dlogs,'profiler.txt'])
if (os.path.exists(dlogs)) and (os.path.exists(dplogs)):
fname = dplogs
break
pass
_stdOut = open(os.sep.join([os.path.dirname(fname),'profiler_report.txt']),'w')
_sys_stdout = sys.stdout
sys.stdout = Log(_stdOut)
try:
p = pstats.Stats(fname)
print >>sys.stdout, p.strip_dirs().sort_stats(-1).print_stats()
finally:
sys.stdout.close()
sys.stdout = _sys_stdout
| 34.45 | 128 | 0.648766 |
b5ffdcf8c2ae28ab3ba428bb3e089f1edf15c1db
| 25 |
py
|
Python
|
src/__init__.py
|
bowdbeg/brat_loader
|
2fe594e59e420d30436636700c6532b9291acc2f
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
bowdbeg/brat_loader
|
2fe594e59e420d30436636700c6532b9291acc2f
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
bowdbeg/brat_loader
|
2fe594e59e420d30436636700c6532b9291acc2f
|
[
"MIT"
] | null | null | null |
from brat_loader import *
| 25 | 25 | 0.84 |
bc869d7156fff130d55607a5bcc3edac0b3261bf
| 5,531 |
py
|
Python
|
Result_Window_Final.py
|
Sawera557/PhotoChamp-Image-Forensic-Tool
|
e7550a97d33cdf58a66ea0efcc451178bfd88a8d
|
[
"MIT"
] | null | null | null |
Result_Window_Final.py
|
Sawera557/PhotoChamp-Image-Forensic-Tool
|
e7550a97d33cdf58a66ea0efcc451178bfd88a8d
|
[
"MIT"
] | null | null | null |
Result_Window_Final.py
|
Sawera557/PhotoChamp-Image-Forensic-Tool
|
e7550a97d33cdf58a66ea0efcc451178bfd88a8d
|
[
"MIT"
] | null | null | null |
from PyQt5.QtWidgets import QApplication, QVBoxLayout, QMessageBox, QPushButton, QDialog, QHBoxLayout
import sys
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5.QtWidgets import QApplication, QVBoxLayout, QMessageBox, QPushButton, QDialog, QHBoxLayout
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
def resource_path(relative_path):
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath("D:\\FYP-3\\PhotoChampEXE\\media\\Icons")
return os.path.join(base_path, relative_path) #resource_path('icon.png')))
def resource_remodel_path(relative_path):
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath("D:\\FYP-3\\PhotoChampEXE\\Re_Traind_Models")
catch: base_path = os.path.abspath("D:\\FYP-3\\PhotoChampEXE\\Re_Traind_Models")
return os.path.join(base_path, relative_path)
class ResultWindow(QDialog):
def __init__(self , label , prob ):
super().__init__()
self.title = "Result"
self.top = 200
self.left = 500
self.width = 400
self.height = 400
self.button_Test_again = QPushButton("Test Again", self)
self.button_quit = QPushButton("Quit", self)
label = label
prob = prob
self.init_window(label , prob)
def init_window(self,label , prob):
self.setWindowTitle(self.title)
self.setWindowIcon(QtGui.QIcon(resource_path('icon.png'))) #"D:\\fyp\\PhotoChamp_FYP-03\\PhotoChamp\\Icons\\icons8-cbs-512.ico"))
self.setGeometry(self.left, self.top, self.width, self.height)
self.setFixedSize(self.width, self.height)
hbox = QHBoxLayout()
hbox.addStretch(1)
m = PlotCanvas(self, width=5, height=4, dpi=80, label=label, prob=prob)
m.move(0, 0)
self.button_Test_again.setToolTip("<h5>to test Another image just Click Test button<h5>")
self.button_Test_again.setIcon(QtGui.QIcon(resource_path('698827-icon-101-folder-search-512.png'))) #"D:\\fyp\\PhotoChamp_FYP-03\\PhotoChamp\\Icons\\698827-icon-101-folder-search-512.png"))
self.button_Test_again.setIconSize(QtCore.QSize(15, 15))
self.button_Test_again.clicked.connect(self.test_again)
hbox.addWidget(self.button_Test_again)
self.button_quit.setToolTip("<h5>Close the program<h5>")
self.button_quit.setIcon(QtGui.QIcon(resource_path('cancel-symbol-transparent-9.png'))) #"D:\\fyp\\PhotoChamp_FYP-03\\PhotoChamp\\Icons\\cancel-symbol-transparent-9.png"))
self.button_quit.setIconSize(QtCore.QSize(15, 15))
self.button_quit.clicked.connect(self.close_main_window)
hbox.addWidget(self.button_quit)
vbox = QVBoxLayout()
vbox.addStretch(1)
vbox.addLayout(hbox)
self.setLayout(vbox)
self.show()
def test_again(self):
from Test_window_Final import Test_window
self.Main_window = Test_window()
self.Main_window.show()
self.close()
def close_main_window(self):
"""
Generate 'question' dialog on clicking 'X' button in title bar.
Reimplement the closeEvent() event handler to include a 'Question'
dialog with options on how to proceed - Save, Close, Cancel buttons
"""
reply = QMessageBox.question(self, "Quit", "Are you sure you want to quit?",
QMessageBox.Cancel | QMessageBox.Close)
if reply == QMessageBox.Close:
self.close()
class PlotCanvas(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=80 , label = "Forged" , prob = 0.1):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
self.label = label
self.prob = prob
self.plotpie(self.label, self.prob)
def plotpie(self , label , prob):
ax = self.figure.add_subplot(111)
if label == "Forged":
labels = [label, "Not Forged"]
probs = [np.abs(prob * 100), np.abs(prob - 1) * 100]
print(np.abs(prob * 100), np.abs(prob - 1) * 100)
colors = ['Red', 'Blue']
ax.text(0.25, 0.95, 'Decision ' + "Forged", transform=ax.transAxes)
ax.axis("equal")
ax.pie(probs, autopct='%1.1f%%', shadow=True, colors=colors, radius=1.5, counterclock=True)
ax.legend(labels, loc=3)
self.draw()
elif label == "Not_Forged":
labels = [label, "Forged"]
probs = [np.abs(prob * 100), np.abs((prob - 1) * 100)]
print(np.abs(prob * 100), np.abs(prob - 1) * 100)
colors = ['Blue', 'Red']
ax.text(0.25, 0.95, 'Decision ' + " Not Forged", transform=ax.transAxes)
ax.axis("equal")
ax.pie(probs, autopct='%1.1f%%', shadow=True, colors=colors, radius=1.5, counterclock=True)
ax.legend(labels, loc=3)
self.draw()
if __name__ == "__main__":
App = QApplication(sys.argv)
App.setStyle('Fusion')
window = ResultWindow(label = "Test" , prob = 50)
sys.exit(App.exec())
| 40.97037 | 218 | 0.613813 |
5fa6a694cb6c5e6d0d1c1a8e3ebfc5441a0ad0cf
| 152 |
py
|
Python
|
FitNesseRoot/files/sikuliScripts/EclipseStuff.sikuli/OpenEclipseHelp.py
|
xebia/FitnesseSikuli
|
47730bdd59e61f3462b0c40e00e9ce47fe3d1d64
|
[
"Apache-2.0"
] | 1 |
2018-08-09T10:55:49.000Z
|
2018-08-09T10:55:49.000Z
|
FitNesseRoot/files/sikuliScripts/EclipseStuff.sikuli/OpenEclipseHelp.py
|
xebia/FitnesseSikuli
|
47730bdd59e61f3462b0c40e00e9ce47fe3d1d64
|
[
"Apache-2.0"
] | 1 |
2015-03-30T07:49:48.000Z
|
2015-03-30T07:49:48.000Z
|
FitNesseRoot/files/sikuliScripts/EclipseStuff.sikuli/OpenEclipseHelp.py
|
xebia/FitnesseSikuli
|
47730bdd59e61f3462b0c40e00e9ce47fe3d1d64
|
[
"Apache-2.0"
] | 3 |
2015-03-26T14:11:21.000Z
|
2018-10-30T22:15:37.000Z
|
App.focus("Eclipse")
wait("Helo.png")
click("Helo.png")
hover("Eclipse")
hover("File")
click("Open file")
type("g",KeyModifier.CMD+KeyModifier.SHIFT)
| 15.2 | 43 | 0.703947 |
932915c6b08ed34966daf9e97aa663011f7ecbb2
| 10,452 |
py
|
Python
|
config/settings/base.py
|
badri/django-sample
|
c8e544e79e81827c91d009ac4d73c127845597b3
|
[
"MIT"
] | null | null | null |
config/settings/base.py
|
badri/django-sample
|
c8e544e79e81827c91d009ac4d73c127845597b3
|
[
"MIT"
] | null | null | null |
config/settings/base.py
|
badri/django-sample
|
c8e544e79e81827c91d009ac4d73c127845597b3
|
[
"MIT"
] | null | null | null |
"""
Base settings for Django Gitlab CI project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (django_gitlab_ci/config/settings/base.py - 3 = django_gitlab_ci/)
APPS_DIR = ROOT_DIR.path('django_gitlab_ci')
# Load operating system environment variables and then prepare to use them
env = environ.Env()
# .env file, should load only in development environment
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# Operating System Environment variables have precedence over variables defined in the .env file,
# that is to say variables from the .env files will only be used if not defined
# as environment variables.
env_file = str(ROOT_DIR.path('.env'))
print('Loading : {}'.format(env_file))
env.read_env(env_file)
print('The .env file has been loaded. See base.py for more information')
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = [
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
]
# Apps specific for this project go here.
LOCAL_APPS = [
# custom users app
'django_gitlab_ci.users.apps.UsersConfig',
# Your stuff: custom apps go here
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'django_gitlab_ci.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""Lakshmi Narasimhan""", '[email protected]'),
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
# Uses django-environ to accept uri format
# See: https://django-environ.readthedocs.io/en/latest/#supported-types
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///django_gitlab_ci'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# PASSWORD STORAGE SETTINGS
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# PASSWORD VALIDATION
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
# ------------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'none'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'django_gitlab_ci.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'django_gitlab_ci.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# django-compressor
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['compressor']
STATICFILES_FINDERS += ['compressor.finders.CompressorFinder']
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| 37.462366 | 107 | 0.6261 |
9f216390cebbe24dcda27f8e5ba118b5f07c0a74
| 2,848 |
py
|
Python
|
mmf/models/transformers/heads/wra.py
|
facebookresearch/pythia
|
079740bee4b357a7b1b866d35e2f1fad6edba8a4
|
[
"BSD-3-Clause"
] | 3,252 |
2018-07-27T02:32:24.000Z
|
2020-05-07T17:54:46.000Z
|
mmf/models/transformers/heads/wra.py
|
facebookresearch/pythia
|
079740bee4b357a7b1b866d35e2f1fad6edba8a4
|
[
"BSD-3-Clause"
] | 209 |
2018-07-30T06:39:59.000Z
|
2020-05-04T22:03:48.000Z
|
mmf/models/transformers/heads/wra.py
|
facebookresearch/pythia
|
079740bee4b357a7b1b866d35e2f1fad6edba8a4
|
[
"BSD-3-Clause"
] | 431 |
2018-07-27T04:17:37.000Z
|
2020-05-05T13:58:02.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Initial version was taken from https://github.com/ChenRocks/UNITER/
# and adapted for MMF.
from typing import Dict
from mmf.common.registry import registry
from mmf.modules.ot import optimal_transport_dist
from torch import nn, Tensor
@registry.register_transformer_head("wra")
class WRA(nn.Module):
"""
Word Region Alignment from UNITER.
Optimal Transport (OT) distance between text and image
features is used to optimize for WRA.
OT transport plan (T) is approximated through IPOT.
"""
def __init__(
self,
loss_name: str = "wra_loss",
ot_inputs_key: str = "wra_info",
wra_label_key: str = "is_correct",
*args,
**kwargs,
):
super().__init__()
self.loss_name = loss_name
self.ot_inputs_key = ot_inputs_key
self.wra_label_key = wra_label_key
def forward(
self,
sequence_output: Tensor,
processed_sample_list: Dict[str, Dict[str, Tensor]],
) -> Dict[str, Dict[str, Tensor]]:
output_dict = {}
assert (
self.ot_inputs_key in processed_sample_list
and processed_sample_list[self.ot_inputs_key] is not None
), (
f"WRA pretraining requires {self.ot_inputs_key} to be in sample "
+ "list with value not None."
)
ot_inputs = processed_sample_list[self.ot_inputs_key]
assert (
ot_inputs.get("txt_pad") is not None
and ot_inputs.get("img_pad") is not None
), (
"WRA pretraining requires 'txt_pad', and 'img_pad' to be in "
+ f"'processed_sample_list[{self.ot_inputs_key}]' with"
+ " values not None."
)
assert processed_sample_list.get(self.wra_label_key) is not None, (
f"WRA pretraining requires {self.wra_label_key} to be in sample "
+ "list with value not None."
)
ctx_emb = sequence_output
tl = processed_sample_list["input_ids"].size(1)
il = processed_sample_list["image_feat"].size(1)
txt_emb = ctx_emb[:, :tl, :]
img_emb = ctx_emb[:, tl : tl + il, :]
txt_pad = ot_inputs["txt_pad"].bool()
img_pad = ot_inputs["img_pad"].bool()
itm_labels = processed_sample_list[self.wra_label_key]
# NOTE: run in fp32 for stability
ot_dist = optimal_transport_dist(
txt_emb.float(), img_emb.float(), txt_pad, img_pad
).to(txt_emb)
ot_pos = ot_dist.masked_select(itm_labels == 1)
ot_neg = ot_dist.masked_select(itm_labels == 0)
ot_loss = (ot_pos.sum() - ot_neg.sum()) / (ot_pos.size(0) + ot_neg.size(0))
output_dict["losses"] = {}
output_dict["losses"][self.loss_name] = ot_loss
return output_dict
| 33.505882 | 83 | 0.618329 |
5f6831cc8e079b6b4688376122fb5e8d7fd6d8c8
| 2,804 |
py
|
Python
|
cartoon/cartoon/spiders/comic_spider.py
|
lhuibin/Spider
|
7dfebf2f77fe1bd4ec70963f0b30e717682f5aa9
|
[
"MIT"
] | 2 |
2018-08-07T16:51:30.000Z
|
2018-08-09T17:52:06.000Z
|
cartoon/cartoon/spiders/comic_spider.py
|
lhuibin/Spider
|
7dfebf2f77fe1bd4ec70963f0b30e717682f5aa9
|
[
"MIT"
] | null | null | null |
cartoon/cartoon/spiders/comic_spider.py
|
lhuibin/Spider
|
7dfebf2f77fe1bd4ec70963f0b30e717682f5aa9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import re
import scrapy
from scrapy import Selector
from cartoon.items import ComicItem
class ComicSpider(scrapy.Spider):
name = 'comic'
def __init__(self):
#图片链接server域名
self.server_img = 'http://n.1whour.com/'
#章节链接server域名
self.server_link = 'http://comic.kukudm.com'
self.allowed_domains = ['comic.kukudm.com']
self.start_urls = ['http://comic.kukudm.com/comiclist/3/']
#匹配图片地址的正则表达式
self.pattern_img = re.compile(r'\+"(.+)\'><span')
#从start_requests发送请求
def start_requests(self):
yield scrapy.Request(url = self.start_urls[0], callback = self.parse1)
#解析response,获得章节图片链接地址
def parse1(self, response):
hxs = Selector(response)
items = []
#章节链接地址
urls = hxs.xpath('//dd/a[1]/@href').extract()
#章节名
dir_names = hxs.xpath('//dd/a[1]/text()').extract()
#保存章节链接和章节名
for index in range(len(urls)):
item = ComicItem()
item['link_url'] = self.server_link + urls[index]
item['dir_name'] = dir_names[index]
items.append(item)
#根据每个章节的链接,发送Request请求,并传递item参数
for item in items[-13:-1]:
yield scrapy.Request(url = item['link_url'], meta = {'item':item}, callback = self.parse2)
#解析获得章节第一页的页码数和图片链接
def parse2(self, response):
#接收传递的item
item = response.meta['item']
#获取章节的第一页的链接
item['link_url'] = response.url
hxs = Selector(response)
#获取章节的第一页的图片链接
pre_img_url = hxs.xpath('//script/text()').extract()
#注意这里返回的图片地址,应该为列表,否则会报错
img_url = [self.server_img + re.findall(self.pattern_img, pre_img_url[0])[0]]
#将获取的章节的第一页的图片链接保存到img_url中
item['img_url'] = img_url
#返回item,交给item pipeline下载图片
yield item
#获取章节的页数
page_num = hxs.xpath('//td[@valign="top"]/text()').re(u'共(\d+)页')[0]
#根据页数,整理出本章节其他页码的链接
pre_link = item['link_url'][:-5]
for each_link in range(2, int(page_num) + 1):
new_link = pre_link + str(each_link) + '.htm'
#根据本章节其他页码的链接发送Request请求,用于解析其他页码的图片链接,并传递item
yield scrapy.Request(url = new_link, meta = {'item':item}, callback = self.parse3)
#解析获得本章节其他页面的图片链接
def parse3(self, response):
#接收传递的item
item = response.meta['item']
#获取该页面的链接
item['link_url'] = response.url
hxs = Selector(response)
pre_img_url = hxs.xpath('//script/text()').extract()
#注意这里返回的图片地址,应该为列表,否则会报错
img_url = [self.server_img + re.findall(self.pattern_img, pre_img_url[0])[0]]
#将获取的图片链接保存到img_url中
item['img_url'] = img_url
#返回item,交给item pipeline下载图片
yield item
| 34.617284 | 102 | 0.595934 |
6d936738a4c3224df6ced2dde55bae75a6406807
| 1,676 |
py
|
Python
|
abc185/d.py
|
nishio/atcoder
|
8db36537b5d8580745d5f98312162506ad7d7ab4
|
[
"MIT"
] | 1 |
2021-03-09T04:28:13.000Z
|
2021-03-09T04:28:13.000Z
|
abc185/d.py
|
nishio/atcoder
|
8db36537b5d8580745d5f98312162506ad7d7ab4
|
[
"MIT"
] | null | null | null |
abc185/d.py
|
nishio/atcoder
|
8db36537b5d8580745d5f98312162506ad7d7ab4
|
[
"MIT"
] | null | null | null |
# included from snippets/main.py
def debug(*x, msg=""):
import sys
print(msg, *x, file=sys.stderr)
def solve(SOLVE_PARAMS):
pass
def main():
# parse input
N, M = map(int, input().split())
AS = list(map(int, input().split()))
AS.append(0)
AS.append(N + 1)
AS.sort()
# debug(AS, msg=":AS")
DS = []
for i in range(M + 1):
d = AS[i + 1] - AS[i]
if d > 1:
DS.append(d - 1)
if not DS:
print(0)
return
# debug(DS, msg=":DS")
k = min(DS)
# debug(k, msg=":k")
ret = 0
for d in DS:
ret += (d - 1) // k + 1
print(ret)
# tests
T1 = """
5 2
1 3
"""
TEST_T1 = """
>>> as_input(T1)
>>> main()
3
"""
T2 = """
13 3
13 3 9
"""
TEST_T2 = """
>>> as_input(T2)
>>> main()
6
"""
T3 = """
5 5
5 2 1 4 3
"""
TEST_T3 = """
>>> as_input(T3)
>>> main()
0
"""
T4 = """
1 0
"""
TEST_T4 = """
>>> as_input(T4)
>>> main()
1
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
print(k)
doctest.run_docstring_examples(g[k], g, name=k)
def as_input(s):
"use in test, use given string as input file"
import io
f = io.StringIO(s.strip())
g = globals()
g["input"] = lambda: bytes(f.readline(), "ascii")
g["read"] = lambda: bytes(f.read(), "ascii")
if __name__ == "__main__":
import sys
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
sys.setrecursionlimit(10 ** 6)
if sys.argv[-1] == "-t":
print("testing")
_test()
sys.exit()
main()
sys.exit()
# end of snippets/main.py
| 15.099099 | 59 | 0.498807 |
57e9426964e5b1a78cfefc38977d4a620f521ade
| 183 |
py
|
Python
|
nbpawspublic/__init__.py
|
toolforge/nbpawspublic
|
a1938ca6de0bb4087e5a47fece653f1cf4364efc
|
[
"BSD-2-Clause"
] | null | null | null |
nbpawspublic/__init__.py
|
toolforge/nbpawspublic
|
a1938ca6de0bb4087e5a47fece653f1cf4364efc
|
[
"BSD-2-Clause"
] | null | null | null |
nbpawspublic/__init__.py
|
toolforge/nbpawspublic
|
a1938ca6de0bb4087e5a47fece653f1cf4364efc
|
[
"BSD-2-Clause"
] | null | null | null |
def _jupyter_nbextension_paths():
return [{
"section": "notebook",
"dest": "nbpawspublic",
"src": "static",
"require": "nbpawspublic/main"
}]
| 20.333333 | 38 | 0.535519 |
72213fcc78c70a55e3e4e6655ff6dbb0d02d51d4
| 10,295 |
py
|
Python
|
procare_python_package/procare/convert.py
|
dominiquesydow/ProCare
|
f01487c07a5b5de9b7aca2cba7f6315fc7275bc7
|
[
"MIT"
] | 1 |
2021-06-04T17:46:36.000Z
|
2021-06-04T17:46:36.000Z
|
procare_python_package/procare/convert.py
|
dominiquesydow/ProCare
|
f01487c07a5b5de9b7aca2cba7f6315fc7275bc7
|
[
"MIT"
] | null | null | null |
procare_python_package/procare/convert.py
|
dominiquesydow/ProCare
|
f01487c07a5b5de9b7aca2cba7f6315fc7275bc7
|
[
"MIT"
] | null | null | null |
# ----------------------------------------------------------------------------
# < ProCare >
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2020 Merveille Eguida
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
"""Conversion between mol2 and pcd format of protein IChem VolSite cavities"""
class _mol2_:
def _mol2_to_pcd(self, ifile_, color_):
""" Extracts coordinates from mol2 files and convert into pcd format """
if ifile_[-5:] != '.mol2':
print("incorrect file extension")
print("file format may be wrong --> no output")
import os
try:
with open(ifile_, "r") as f:
mol2 = f.read().split("\n")
del mol2[-1]
except IOError:
print("Cannot read {}".format(ifile_))
return -1, None, None
try:
start = mol2.index("@<TRIPOS>ATOM")+1
#print("Coordinates start at: {}".format(start))
except ValueError:
print("Cannot index @<TRIPOS>ATOM in mol2 file")
return -1, None, None
try:
end = mol2.index("@<TRIPOS>BOND")-1
#print("Coordinates end at: {}".format(end))
except ValueError:
print("Cannot index @<TRIPOS>BOND in mol2 file")
return -1, None, None
ofilename = os.path.basename(ifile_).replace("mol2", "pcd")
try:
ofile = open(ofilename, "w")
except IOError:
print("mol2_to_pcd: Cannot write to current directory: "
"{}. Please check for access rights.".format(os.getcwd()))
ofile.close()
return -1, None, None
properties = []
colors = []
ofile.write("VERSION .7\nFIELDS x y z rgb\nSIZE 4 4 4 4\n"
"TYPE F F F F\nCOUNT 1 1 1 1\n")
ofile.write("WIDTH {}\nHEIGHT 1\nVIEWPOINT 0 0 0 1 0 0 0\n"
"POINTS {}\nDATA ascii".format(end-start+1, end-start+1))
for i in range(start, end+1):
mol2lines = mol2[i].split()
index = int(mol2lines[0])
atom = str(mol2lines[1])
x = float(mol2lines[2])
y = float(mol2lines[3])
z = float(mol2lines[4])
properties.append([index, atom])
colors.append(color_[atom])
ofile.write("\n{} {} {} {}".format(x, y, z, color_[atom]))
ofile.close()
#print(ofilename)
return ofilename, properties, colors
class _pcd_:
def _get_coordinates(self, ifile_):
if ifile_[-4:] != '.pcd':
print("incorrect file extension")
print("file format may be wrong --> no output")
if "DATA ascii" not in open(ifile_, 'r').read():
return -1
coordinates = []
errors = []
with open(ifile_, 'r') as f:
data = f.read().split('\n')
data = [l for l in data[10:] if l != '']
for line_ in data:
x, y, z, rgb = line_.split()
try:
x = float(x)
except ValueError:
errors.append(-1)
try:
y = float(y)
except ValueError:
errors.append(-1)
try:
z = float(z)
except ValueError:
errors.append(-1)
try:
rgb = int(rgb)
except ValueError:
errors.append(-1)
if -1 in errors:
print("Errors while parsing PCD")
break
coordinates.append([x, y, z, rgb])
if -1 not in errors:
self.coordinates = coordinates
return coordinates
else:
return -1
def _write_mol2(self, ofile_, coordinates_, atom_, atom_type_, residue_,
macromol_="PROTEIN"):
import os
from time import strftime, localtime
if ofile_[-5:] != '.mol2':
ofile_ += '.mol2'
name = os.path.splitext(ofile_)[0]
of_string = ""
of_string += "# Modified by ProCare\n"
of_string += "# Modification time: {}\n".format(
strftime("%a %d %b %Y %H:%M:%S", localtime()))
of_string += "# Name: {}.mol2\n\n".format(name)
of_string += "@<TRIPOS>MOLECULE\n"
of_string += "{}\n".format(name)
of_string += "{:>5}{:>6}{:>6}{:>6}{:>6}\n".format(
len(coordinates_), 0, 0, 0, 0)
of_string += "{}\n".format(macromol_)
of_string += "NO_CHARGES\n"
of_string += "@<TRIPOS>ATOM"
for i, point in enumerate(coordinates_):
x, y, z, rgb = [*point]
of_string += ("\n{:>7} {:<8} {:>9.4f} {:>9.4f} {:>9.4f} "
"{:<5} {:>5} {:<8} {:>9}".format(i+1,
atom_[rgb],
x,
y,
z,
atom_type_[rgb],
i+1,
residue_[rgb]+str(i+1),
0.0000
))
of_string += "\n@<TRIPOS>BOND"
with open(ofile_, 'w') as of:
of.write(of_string)
print("written mol2 to {}".format(ofile_))
self.type = "pcd"
return ofile_
def _pcd_to_mol2(self, ifile_, atom_, atom_type_, residue_,
macromol_="PROTEIN"):
import os
coordinates = self._get_coordinates(ifile_)
if coordinates != -1:
ofile = os.path.basename(ifile_).replace('pcd', 'mol2')
if self._write_mol2(ofile, coordinates, atom_, atom_type_,
residue_, macromol_) == ofile:
self.ifile = ifile_
return ofile
else:
return -1
class _volsite_cavity_(_mol2_, _pcd_):
def __init__(self):
__COLOR = {"OG":8204959,
"N":30894,
"O":15219528,
"NZ":15231913,
"CZ":4646984,
"CA":16741671,
"DU":7566712,
"OD1":0,}
__ATOM = {val:key for key, val in __COLOR.items()}
__ATOM_TYPE = {"OG":"O.3",
"N":"N.am",
"O":"O.2",
"NZ":"N.4",
"CZ":"C.ar",
"CA":"C.3",
"DU":"H",
"OD1":"O.co2",}
__RESIDUE = {"OG":"SER",
"N":"ALA",
"O":"ALA",
"NZ":"LYS",
"CZ":"PHE",
"CA":"GLY",
"DU":"CUB",
"OD1":"ASP",}
self.COLOR = __COLOR
self.ATOM = __ATOM
self.ATOM_TYPE = {key:__ATOM_TYPE[val]
for key, val in __ATOM.items()}
self.RESIDUE = {key:__RESIDUE[val]
for key, val in __ATOM.items()}
def mol2_to_pcd(self, ifile_):
return self._mol2_to_pcd(ifile_, self.COLOR)
def pcd_to_mol2(self, ifile_):
return self._pcd_to_mol2(ifile_, self.ATOM,
self.ATOM_TYPE,
self.RESIDUE)
def write_mol2(self, ofile_, coordinates_):
return self._write_mol2(ofile_, coordinates_, self.ATOM, self.ATOM_TYPE,
self.RESIDUE)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str,
help="input file",
required=True)
parser.add_argument('-t', '--itype', type=str,
help="input file type: mol2 or pcd",
choices=["mol2", "pcd"],
required=True)
parser.add_argument('-m', '--macromol', type=str,
help="macromolecule type: cavity, ...",
choices=["cav"],
required=True,
default="cav")
args = parser.parse_args()
if args.macromol == "cav":
molecule = _volsite_cavity_()
if args.itype == "pcd":
molecule.pcd_to_mol2(args.input)
elif args.itype == "mol2":
molecule.mol2_to_pcd(args.input)
#coords = [[1, 2, 3, 8204959], [4, 5, 6, 8204959]]
#molecule.write_mol2('test.mol2', coords)
| 33.865132 | 80 | 0.45323 |
c6236482dfcda36adae0d7fe79a291a4c3cc040b
| 17,487 |
py
|
Python
|
main.py
|
zyyhhxx/convNet.pytorch
|
85f65f80b6d75810077c54bd3a8c9094cc2a26f9
|
[
"MIT"
] | null | null | null |
main.py
|
zyyhhxx/convNet.pytorch
|
85f65f80b6d75810077c54bd3a8c9094cc2a26f9
|
[
"MIT"
] | null | null | null |
main.py
|
zyyhhxx/convNet.pytorch
|
85f65f80b6d75810077c54bd3a8c9094cc2a26f9
|
[
"MIT"
] | null | null | null |
import argparse
import time
import logging
import json
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import models
import torch.distributed as dist
from os import path, makedirs
from data import DataRegime, SampledDataRegime
from utils.log import setup_logging, ResultsLog, save_checkpoint, export_args_namespace
from utils.optim import OptimRegime
from utils.cross_entropy import CrossEntropyLoss
from utils.misc import torch_dtypes
from utils.param_filter import FilterModules, is_bn
from datetime import datetime
from ast import literal_eval
from trainer import Trainer
import time
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ConvNet Training')
parser.add_argument('--config-file', default=None,
help='json configuration file')
parser.add_argument('--results-dir', metavar='RESULTS_DIR', default='./results',
help='results dir')
parser.add_argument('--save', metavar='SAVE', default='',
help='saved folder')
parser.add_argument('--datasets-dir', metavar='DATASETS_DIR', default='~/Datasets',
help='datasets dir')
parser.add_argument('--dataset', metavar='DATASET', default='imagenet',
help='dataset name or folder')
parser.add_argument('--model', '-a', metavar='MODEL', default='alexnet',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: alexnet)')
parser.add_argument('--input-size', type=int, default=None,
help='image input size')
parser.add_argument('--model-config', default='',
help='additional architecture configuration')
parser.add_argument('--dtype', default='float',
help='type of tensor: ' +
' | '.join(torch_dtypes.keys()) +
' (default: float)')
parser.add_argument('--device', default='cuda',
help='device assignment ("cpu" or "cuda")')
parser.add_argument('--device-ids', default=[0], type=int, nargs='+',
help='device ids assignment (e.g 0 1 2 3')
parser.add_argument('--world-size', default=-1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int,
help='rank of distributed processes')
parser.add_argument('--dist-init', default='env://', type=str,
help='init used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=-1, type=int, metavar='N',
help='manual epoch number (useful on restarts). -1 for unset (will start at 0)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--eval-batch-size', default=-1, type=int,
help='mini-batch size (default: same as training)')
parser.add_argument('--optimizer', default='SGD', type=str, metavar='OPT',
help='optimizer function used')
parser.add_argument('--drop-optim-state', action='store_true', default=False,
help='do not save optimizer state for resume')
parser.add_argument('--save-all', action='store_true', default=False,
help='save checkpoint for every epoch')
parser.add_argument('--label-smoothing', default=0, type=float,
help='label smoothing coefficient - default 0')
parser.add_argument('--mixup', default=None, type=float,
help='mixup alpha coefficient - default None')
parser.add_argument('--cutmix', default=None, type=float,
help='cutmix alpha coefficient - default None')
parser.add_argument('--duplicates', default=1, type=int,
help='number of augmentations over singel example')
parser.add_argument('--chunk-batch', default=1, type=int,
help='chunk batch size for multiple passes (training)')
parser.add_argument('--cutout', action='store_true', default=False,
help='cutout augmentations')
parser.add_argument('--autoaugment', action='store_true', default=False,
help='use autoaugment policies')
parser.add_argument('--grad-clip', default=-1, type=float,
help='maximum grad norm value, -1 for none')
parser.add_argument('--loss-scale', default=1, type=float,
help='loss scale for mixed precision training.')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=0, type=float,
metavar='W', help='weight decay (default: 0)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--adapt-grad-norm', default=None, type=int,
help='adapt gradient scale frequency (default: None)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', type=str, metavar='FILE',
help='evaluate model FILE on validation set')
parser.add_argument('--seed', default=123, type=int,
help='random seed (default: 123)')
parser.add_argument('--tensorwatch', action='store_true', default=False,
help='set tensorwatch logging')
parser.add_argument('--tensorwatch-port', default=0, type=int,
help='set tensorwatch port')
def main():
args = parser.parse_args()
if args.config_file is not None:
with open(args.config_file) as f:
config_dict = json.loads(f.read())
parser.set_defaults(**config_dict)
args = parser.parse_args()
main_worker(args)
def main_worker(args):
global best_prec1, dtype
best_prec1 = 0
dtype = torch_dtypes.get(args.dtype)
torch.manual_seed(args.seed)
time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if args.evaluate:
args.results_dir = '/tmp'
if args.save is '':
args.save = time_stamp
save_path = path.join(args.results_dir, args.save)
args.distributed = args.local_rank >= 0 or args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_init,
world_size=args.world_size, rank=args.local_rank)
args.local_rank = dist.get_rank()
args.world_size = dist.get_world_size()
if args.dist_backend == 'mpi':
# If using MPI, select all visible devices
args.device_ids = list(range(torch.cuda.device_count()))
else:
args.device_ids = [args.local_rank]
if not (args.distributed and args.local_rank > 0):
if not path.exists(save_path):
makedirs(save_path)
export_args_namespace(args, path.join(save_path, 'config.json'))
setup_logging(path.join(save_path, 'log.txt'),
resume=args.resume is not '',
dummy=args.distributed and args.local_rank > 0)
results_path = path.join(save_path)
results = ResultsLog(results_path,
title='Training Results - %s' % args.save)
logging.info("saving to %s", save_path)
logging.debug("run arguments: %s", args)
logging.info("creating model %s", args.model)
if 'cuda' in args.device and torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
torch.cuda.set_device(args.device_ids[0])
cudnn.benchmark = True
else:
args.device_ids = None
# create model
model = models.__dict__[args.model]
model_config = {'dataset': args.dataset}
if args.model_config is not '':
model_config = dict(model_config, **literal_eval(args.model_config))
model = model(**model_config)
logging.info("created model with configuration: %s", model_config)
num_parameters = sum([l.nelement() for l in model.parameters()])
logging.info("number of parameters: %d", num_parameters)
# optionally resume from a checkpoint
if args.evaluate:
if not path.isfile(args.evaluate):
parser.error('invalid checkpoint: {}'.format(args.evaluate))
checkpoint = torch.load(args.evaluate, map_location="cpu")
# Overrride configuration with checkpoint info
args.model = checkpoint.get('model', args.model)
args.model_config = checkpoint.get('config', args.model_config)
# load checkpoint
model.load_state_dict(checkpoint['state_dict'])
logging.info("loaded checkpoint '%s' (epoch %s)",
args.evaluate, checkpoint['epoch'])
if args.resume:
checkpoint_file = args.resume
if path.isdir(checkpoint_file):
results.load(path.join(checkpoint_file, 'results.csv'))
checkpoint_file = path.join(
checkpoint_file, 'model_best.pth.tar')
if path.isfile(checkpoint_file):
logging.info("loading checkpoint '%s'", args.resume)
checkpoint = torch.load(checkpoint_file, map_location="cpu")
if args.start_epoch < 0: # not explicitly set
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optim_state_dict = checkpoint.get('optim_state_dict', None)
logging.info("loaded checkpoint '%s' (epoch %s)",
checkpoint_file, checkpoint['epoch'])
else:
logging.error("no checkpoint found at '%s'", args.resume)
else:
optim_state_dict = None
# define loss function (criterion) and optimizer
loss_params = {}
if args.label_smoothing > 0:
loss_params['smooth_eps'] = args.label_smoothing
criterion = getattr(model, 'criterion', CrossEntropyLoss)(**loss_params)
criterion.to(args.device, dtype)
model.to(args.device, dtype)
# Batch-norm should always be done in float
if 'half' in args.dtype:
FilterModules(model, module=is_bn).to(dtype=torch.float)
# optimizer configuration
optim_regime = getattr(model, 'regime', [{'epoch': 0,
'optimizer': args.optimizer,
'lr': args.lr,
'momentum': args.momentum,
'weight_decay': args.weight_decay}])
optimizer = optim_regime if isinstance(optim_regime, OptimRegime) \
else OptimRegime(model, optim_regime, use_float_copy='half' in args.dtype)
if optim_state_dict is not None:
optimizer.load_state_dict(optim_state_dict)
trainer = Trainer(model, criterion, optimizer,
device_ids=args.device_ids, device=args.device, dtype=dtype, print_freq=args.print_freq,
distributed=args.distributed, local_rank=args.local_rank, mixup=args.mixup, cutmix=args.cutmix,
loss_scale=args.loss_scale, grad_clip=args.grad_clip, adapt_grad_norm=args.adapt_grad_norm)
if args.tensorwatch:
trainer.set_watcher(filename=path.abspath(path.join(save_path, 'tensorwatch.log')),
port=args.tensorwatch_port)
# Evaluation Data loading code
args.eval_batch_size = args.eval_batch_size if args.eval_batch_size > 0 else args.batch_size
val_data = DataRegime(getattr(model, 'data_eval_regime', None),
defaults={'datasets_path': args.datasets_dir, 'name': args.dataset, 'split': 'val', 'augment': False,
'input_size': args.input_size, 'batch_size': args.eval_batch_size, 'shuffle': False,
'num_workers': args.workers, 'pin_memory': True, 'drop_last': False})
if args.evaluate:
results = trainer.validate(val_data.get_loader())
logging.info(results)
return
# Training Data loading code
train_data_defaults = {'datasets_path': args.datasets_dir, 'name': args.dataset, 'split': 'train', 'augment': True,
'input_size': args.input_size, 'batch_size': args.batch_size, 'shuffle': True,
'num_workers': args.workers, 'pin_memory': True, 'drop_last': True,
'distributed': args.distributed, 'duplicates': args.duplicates, 'autoaugment': args.autoaugment,
'cutout': {'holes': 1, 'length': 16} if args.cutout else None}
if hasattr(model, 'sampled_data_regime'):
sampled_data_regime = model.sampled_data_regime
probs, regime_configs = zip(*sampled_data_regime)
regimes = []
for config in regime_configs:
defaults = {**train_data_defaults}
defaults.update(config)
regimes.append(DataRegime(None, defaults=defaults))
train_data = SampledDataRegime(regimes, probs)
else:
train_data = DataRegime(
getattr(model, 'data_regime', None), defaults=train_data_defaults)
logging.info('optimization regime: %s', optim_regime)
logging.info('data regime: %s', train_data)
args.start_epoch = max(args.start_epoch, 0)
trainer.training_steps = args.start_epoch * len(train_data)
start_time = time.time()
end_time = None
end_epoch = None
found = False
for epoch in range(args.start_epoch, args.epochs):
trainer.epoch = epoch
train_data.set_epoch(epoch)
val_data.set_epoch(epoch)
logging.info('\nStarting Epoch: {0}\n'.format(epoch + 1))
# train for one epoch
train_results = trainer.train(train_data.get_loader(),
chunk_batch=args.chunk_batch)
# evaluate on validation set
val_results = trainer.validate(val_data.get_loader())
if args.distributed and args.local_rank > 0:
continue
# remember best prec@1 and save checkpoint
is_best = val_results['prec1'] > best_prec1
best_prec1 = max(val_results['prec1'], best_prec1)
if args.drop_optim_state:
optim_state_dict = None
else:
optim_state_dict = optimizer.state_dict()
save_checkpoint({
'epoch': epoch + 1,
'model': args.model,
'config': args.model_config,
'state_dict': model.state_dict(),
'optim_state_dict': optim_state_dict,
'best_prec1': best_prec1
}, is_best, path=save_path, save_all=args.save_all)
logging.info('\nResults - Epoch: {0}\n'
'Training Loss {train[loss]:.4f} \t'
'Training Prec@1 {train[prec1]:.3f} \t'
'Training Prec@5 {train[prec5]:.3f} \t'
'Validation Loss {val[loss]:.4f} \t'
'Validation Prec@1 {val[prec1]:.3f} \t'
'Validation Prec@5 {val[prec5]:.3f} \t\n'
.format(epoch + 1, train=train_results, val=val_results))
values = dict(epoch=epoch + 1, steps=trainer.training_steps)
values.update({'training ' + k: v for k, v in train_results.items()})
values.update({'validation ' + k: v for k, v in val_results.items()})
results.add(**values)
results.plot(x='epoch', y=['training loss', 'validation loss'],
legend=['training', 'validation'],
title='Loss', ylabel='loss')
results.plot(x='epoch', y=['training error1', 'validation error1'],
legend=['training', 'validation'],
title='Error@1', ylabel='error %')
results.plot(x='epoch', y=['training error5', 'validation error5'],
legend=['training', 'validation'],
title='Error@5', ylabel='error %')
if 'grad' in train_results.keys():
results.plot(x='epoch', y=['training grad'],
legend=['gradient L2 norm'],
title='Gradient Norm', ylabel='value')
results.save()
if not found and val_results['prec1'] > 94:
found = True
end_time = time.time() - start_time
end_epoch = epoch + 1
if not found:
end_time = time.time() - start_time
end_epoch = epoch + 1
print("Target reached: {}, minutes: {}, epochs: {}".format(found, round(end_time / 60, 3), end_epoch))
if __name__ == '__main__':
main()
| 46.261905 | 127 | 0.614171 |
7bfef390355062218bfa38c55710315a4f7fc63f
| 1,281 |
py
|
Python
|
baya/templatetags/baya_tags.py
|
hrichards/baya
|
f319cef5e95cd6a166265d51ae0ea236b6f65be3
|
[
"MIT"
] | null | null | null |
baya/templatetags/baya_tags.py
|
hrichards/baya
|
f319cef5e95cd6a166265d51ae0ea236b6f65be3
|
[
"MIT"
] | 1 |
2018-12-28T16:53:42.000Z
|
2018-12-28T16:53:42.000Z
|
baya/templatetags/baya_tags.py
|
hrichards/baya
|
f319cef5e95cd6a166265d51ae0ea236b6f65be3
|
[
"MIT"
] | null | null | null |
from baya.utils import has_permission
from django import template
from django.core.urlresolvers import resolve
from django.core.urlresolvers import reverse
register = template.Library()
@register.assignment_tag(takes_context=True)
def can_user_perform_action(context, action, *args, **kwargs):
"""
Assignment tag to check user permission within a template.
Example:
{% can_user_perform_action "home" as can_view_homepage %}
Args:
context: The template context (implicitly passed in because
takes_context=True)
action: The name of the url
args/kwargs: The args/kwargs required by reverse
Returns:
bool: True if user has permission, False otherwise.
Caveats:
If there is no Gate (no requires function wrapping the viewfunc),
has_permission returns False.
action, args, and kwargs are fed directly into reverse. If they aren't
given correctly, exceptions will be thrown. e.g. You supply both
args and kwargs. For details please see django docs:
https://docs.djangoproject.com/en/1.8/ref/urlresolvers/#reverse
"""
view_func = resolve(reverse(action, args=args, kwargs=kwargs)).func
return has_permission(view_func, context['user'], 'any')
| 33.710526 | 78 | 0.708821 |
3749c9f4dff13c02363fe070f4a2155a56f424da
| 4,284 |
py
|
Python
|
test/functional/p2p_add_connections.py
|
fujicoin/fujicoin-22.0
|
acdf52ee4b54ba24e904fb2ed0cb578b2d755e48
|
[
"MIT"
] | 17 |
2017-03-21T11:33:12.000Z
|
2021-08-10T04:11:30.000Z
|
test/functional/p2p_add_connections.py
|
fujicoin/fujicoin-22.0
|
acdf52ee4b54ba24e904fb2ed0cb578b2d755e48
|
[
"MIT"
] | 2 |
2018-01-20T04:45:53.000Z
|
2020-01-06T19:52:13.000Z
|
test/functional/p2p_add_connections.py
|
fujicoin/fujicoin
|
acdf52ee4b54ba24e904fb2ed0cb578b2d755e48
|
[
"MIT"
] | 7 |
2017-02-12T08:49:39.000Z
|
2021-07-18T11:33:59.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2020 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test add_outbound_p2p_connection test framework functionality"""
from test_framework.p2p import P2PInterface
from test_framework.test_framework import FujicoinTestFramework
from test_framework.util import assert_equal
def check_node_connections(*, node, num_in, num_out):
info = node.getnetworkinfo()
assert_equal(info["connections_in"], num_in)
assert_equal(info["connections_out"], num_out)
class P2PAddConnections(FujicoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_network(self):
self.setup_nodes()
# Don't connect the nodes
def run_test(self):
self.log.info("Add 8 outbounds to node 0")
for i in range(8):
self.log.info(f"outbound: {i}")
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i, connection_type="outbound-full-relay")
self.log.info("Add 2 block-relay-only connections to node 0")
for i in range(2):
self.log.info(f"block-relay-only: {i}")
# set p2p_idx based on the outbound connections already open to the
# node, so add 8 to account for the previous full-relay connections
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i + 8, connection_type="block-relay-only")
self.log.info("Add 2 block-relay-only connections to node 1")
for i in range(2):
self.log.info(f"block-relay-only: {i}")
self.nodes[1].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i, connection_type="block-relay-only")
self.log.info("Add 5 inbound connections to node 1")
for i in range(5):
self.log.info(f"inbound: {i}")
self.nodes[1].add_p2p_connection(P2PInterface())
self.log.info("Add 8 outbounds to node 1")
for i in range(8):
self.log.info(f"outbound: {i}")
# bump p2p_idx to account for the 2 existing outbounds on node 1
self.nodes[1].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i + 2)
self.log.info("Check the connections opened as expected")
check_node_connections(node=self.nodes[0], num_in=0, num_out=10)
check_node_connections(node=self.nodes[1], num_in=5, num_out=10)
self.log.info("Disconnect p2p connections & try to re-open")
self.nodes[0].disconnect_p2ps()
check_node_connections(node=self.nodes[0], num_in=0, num_out=0)
self.log.info("Add 8 outbounds to node 0")
for i in range(8):
self.log.info(f"outbound: {i}")
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i)
check_node_connections(node=self.nodes[0], num_in=0, num_out=8)
self.log.info("Add 2 block-relay-only connections to node 0")
for i in range(2):
self.log.info(f"block-relay-only: {i}")
# bump p2p_idx to account for the 8 existing outbounds on node 0
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i + 8, connection_type="block-relay-only")
check_node_connections(node=self.nodes[0], num_in=0, num_out=10)
self.log.info("Restart node 0 and try to reconnect to p2ps")
self.restart_node(0)
self.log.info("Add 4 outbounds to node 0")
for i in range(4):
self.log.info(f"outbound: {i}")
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i)
check_node_connections(node=self.nodes[0], num_in=0, num_out=4)
self.log.info("Add 2 block-relay-only connections to node 0")
for i in range(2):
self.log.info(f"block-relay-only: {i}")
# bump p2p_idx to account for the 4 existing outbounds on node 0
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i + 4, connection_type="block-relay-only")
check_node_connections(node=self.nodes[0], num_in=0, num_out=6)
check_node_connections(node=self.nodes[1], num_in=5, num_out=10)
if __name__ == '__main__':
P2PAddConnections().main()
| 44.164948 | 120 | 0.6669 |
94c10f9c5f0aec9265d46b7273f0b45d36e03b8e
| 4,218 |
py
|
Python
|
src/iron_throne/pretenders.py
|
BernardFW/iron-throne
|
23130dfdb033f12c6fce443447ee2cdb46cbdea1
|
[
"Apache-2.0"
] | 1 |
2018-02-26T15:16:19.000Z
|
2018-02-26T15:16:19.000Z
|
src/iron_throne/pretenders.py
|
BernardFW/iron-throne
|
23130dfdb033f12c6fce443447ee2cdb46cbdea1
|
[
"Apache-2.0"
] | 2 |
2018-02-08T09:08:23.000Z
|
2018-02-08T09:20:22.000Z
|
src/iron_throne/pretenders.py
|
BernardFW/iron-throne
|
23130dfdb033f12c6fce443447ee2cdb46cbdea1
|
[
"Apache-2.0"
] | null | null | null |
from collections import (
defaultdict,
)
from typing import (
Any,
Dict,
Iterator,
List,
NamedTuple,
Optional,
Text,
Tuple,
)
from iron_throne.claim import (
Proof,
)
from .claim import (
Claim,
)
from .words import (
Word,
tokenize,
)
class Pretender(object):
"""
What if I say I'm not like the others?
A pretender is an object capable of claiming words within a list of words.
"""
def claim(self, words: List['Word']) -> None:
"""
Iterate the list of words in order to claim them. Claimed words will
get a claim appended to their claims list.
"""
raise NotImplementedError
class Expression(object):
"""
Several words that come together. Like a wine name or multi-word color
name.
"""
def __init__(self, text: Text, entity: Text, value: Any):
self.text = text
self.entity = entity
self.value = value
self._words = list(tokenize(text))
def __hash__(self):
return hash(self.text) ^ hash(self.entity) ^ hash(self.value)
def __eq__(self, other):
return self.text == other.text and \
self.entity == other.entity and \
self.value == other.value
def __repr__(self):
return f'Expression<{self.entity}={self.value} "{self.text}">'
@property
def words(self) -> List['Word']:
"""
Provide a read-only access to the words list, because it is generated
automatically at init.
"""
return self._words
class ExpressionMatch(NamedTuple):
expression: Expression
word: Word
seq: int
order: int
TrigramIndex = Dict[
Tuple[Optional[Text], Optional[Text], Optional[Text]],
List[ExpressionMatch]
]
class ExpressionPretender(Pretender):
MIN_SCORE = .6
def __init__(self, expressions: List[Expression], seq: int = 0):
self.expressions = expressions
self.seq = seq
self.index: TrigramIndex = self.build_index()
def build_index(self) -> TrigramIndex:
index: TrigramIndex = defaultdict(lambda: [])
for seq, expression in enumerate(self.expressions):
for order, word in enumerate(expression.words):
for t in word.trigrams:
index[t].append(ExpressionMatch(
expression,
word,
self.seq + seq,
order,
))
return index
def claim_word(self, word: Word, claims: Dict[Expression, Claim]) -> None:
matches: Dict[ExpressionMatch, int] = defaultdict(lambda: 0)
len2 = float(len(word.trigrams))
for t in word.trigrams:
for match in self.index[t]:
matches[match] += 1
def compute_scores() -> Iterator[Tuple[ExpressionMatch, float]]:
for m, count in matches.items():
count = float(count)
len1 = float(len(m.word.trigrams))
s = count / (len1 + len2 - count)
if s > self.MIN_SCORE:
yield m, s
for match, score in compute_scores():
claim = self.get_claim(claims, match)
Proof.attach(
order=match.order,
claim=claim,
word=word,
score=score,
)
def get_claim(self,
claims: Dict[Expression, Claim],
match: ExpressionMatch) -> Claim:
if match.expression not in claims:
claims[match.expression] = Claim(
entity=match.expression.entity,
value=match.expression.value,
score=0,
length=len(match.expression.words),
seq=match.seq,
)
return claims[match.expression]
def claim(self, words: List[Word]) -> None:
claims: Dict[Expression, Claim] = {}
for word in words:
self.claim_word(word, claims)
for claim in claims.values():
total = sum(p.score for p in claim.proofs)
claim.score = float(total) / float(len(claim.proofs))
| 25.877301 | 78 | 0.556899 |
a888f4a309d27bd72220edb167f006812d611001
| 4,008 |
py
|
Python
|
awacs/route53.py
|
mprince/awacs
|
f6a16af326ac7fd11e2e2be3a48180475f150611
|
[
"BSD-2-Clause"
] | null | null | null |
awacs/route53.py
|
mprince/awacs
|
f6a16af326ac7fd11e2e2be3a48180475f150611
|
[
"BSD-2-Clause"
] | null | null | null |
awacs/route53.py
|
mprince/awacs
|
f6a16af326ac7fd11e2e2be3a48180475f150611
|
[
"BSD-2-Clause"
] | 1 |
2020-04-03T06:37:42.000Z
|
2020-04-03T06:37:42.000Z
|
# Copyright (c) 2012-2013, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action as BaseAction
from aws import BaseARN
service_name = 'Amazon Route 53'
prefix = 'route53'
class Action(BaseAction):
def __init__(self, action=None):
sup = super(Action, self)
sup.__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource='', region='', account=''):
sup = super(ARN, self)
sup.__init__(service=prefix, resource=resource, region=region,
account=account)
AssociateVPCWithHostedZone = Action('AssociateVPCWithHostedZone')
ChangeResourceRecordSets = Action('ChangeResourceRecordSets')
ChangeTagsForResource = Action('ChangeTagsForResource')
CreateHealthCheck = Action('CreateHealthCheck')
CreateHostedZone = Action('CreateHostedZone')
CreateQueryLoggingConfig = Action('CreateQueryLoggingConfig')
CreateReusableDelegationSet = Action('CreateReusableDelegationSet')
CreateTrafficPolicy = Action('CreateTrafficPolicy')
CreateTrafficPolicyInstance = Action('CreateTrafficPolicyInstance')
CreateTrafficPolicyVersion = Action('CreateTrafficPolicyVersion')
CreateVPCAssociationAuthorization = \
Action('CreateVPCAssociationAuthorization')
DeleteHealthCheck = Action('DeleteHealthCheck')
DeleteHostedZone = Action('DeleteHostedZone')
DeleteQueryLoggingConfig = Action('DeleteQueryLoggingConfig')
DeleteReusableDelegationSet = Action('DeleteReusableDelegationSet')
DeleteTrafficPolicy = Action('DeleteTrafficPolicy')
DeleteTrafficPolicyInstance = Action('DeleteTrafficPolicyInstance')
DeleteVPCAssociationAuthorization = \
Action('DeleteVPCAssociationAuthorization')
DisableDomainAutoRenew = Action('DisableDomainAutoRenew')
DisassociateVPCFromHostedZone = Action('DisassociateVPCFromHostedZone')
EnableDomainAutoRenew = Action('EnableDomainAutoRenew')
GetAccountLimit = Action('GetAccountLimit')
GetChange = Action('GetChange')
GetCheckerIpRanges = Action('GetCheckerIpRanges')
GetGeoLocation = Action('GetGeoLocation')
GetHealthCheck = Action('GetHealthCheck')
GetHealthCheckCount = Action('GetHealthCheckCount')
GetHealthCheckLastFailureReason = \
Action('GetHealthCheckLastFailureReason')
GetHealthCheckStatus = Action('GetHealthCheckStatus')
GetHostedZone = Action('GetHostedZone')
GetHostedZoneCount = Action('GetHostedZoneCount')
GetHostedZoneLimit = Action('GetHostedZoneLimit')
GetQueryLoggingConfig = Action('GetQueryLoggingConfig')
GetReusableDelegationSet = Action('GetReusableDelegationSet')
GetReusableDelegationSetLimit = Action('GetReusableDelegationSetLimit')
GetTrafficPolicy = Action('GetTrafficPolicy')
GetTrafficPolicyInstance = Action('GetTrafficPolicyInstance')
GetTrafficPolicyInstanceCount = Action('GetTrafficPolicyInstanceCount')
ListGeoLocations = Action('ListGeoLocations')
ListHealthChecks = Action('ListHealthChecks')
ListHostedZones = Action('ListHostedZones')
ListHostedZonesByName = Action('ListHostedZonesByName')
ListQueryLoggingConfigs = Action('ListQueryLoggingConfigs')
ListResourceRecordSets = Action('ListResourceRecordSets')
ListReusableDelegationSets = Action('ListReusableDelegationSets')
ListTagsForResource = Action('ListTagsForResource')
ListTagsForResources = Action('ListTagsForResources')
ListTrafficPolicies = Action('ListTrafficPolicies')
ListTrafficPolicyInstances = Action('ListTrafficPolicyInstances')
ListTrafficPolicyInstancesByHostedZone = \
Action('ListTrafficPolicyInstancesByHostedZone')
ListTrafficPolicyInstancesByPolicy = \
Action('ListTrafficPolicyInstancesByPolicy')
ListTrafficPolicyVersions = Action('ListTrafficPolicyVersions')
ListVPCAssociationAuthorizations = \
Action('ListVPCAssociationAuthorizations')
TestDNSAnswer = Action('TestDNSAnswer')
UpdateHealthCheck = Action('UpdateHealthCheck')
UpdateHostedZoneComment = Action('UpdateHostedZoneComment')
UpdateTrafficPolicyComment = Action('UpdateTrafficPolicyComment')
UpdateTrafficPolicyInstance = Action('UpdateTrafficPolicyInstance')
| 44.533333 | 71 | 0.825349 |
3e9cf51fcf3213638dbbdca77a07c5fbe70b80b4
| 687 |
py
|
Python
|
FaceRecognitionWebsite/codeDesign/myDjango02/app01/migrations/0022_teacherregister.py
|
ChunjunHu/FaceRecognitionLibraryWebsite
|
d979d410dfab52d8bda7a5328242b66d6a6b752d
|
[
"MIT"
] | 1 |
2021-11-05T21:04:47.000Z
|
2021-11-05T21:04:47.000Z
|
FaceRecognitionWebsite/codeDesign/myDjango02/app01/migrations/0022_teacherregister.py
|
ChunjunHu/FaceRecognitionLibraryWebsite
|
d979d410dfab52d8bda7a5328242b66d6a6b752d
|
[
"MIT"
] | null | null | null |
FaceRecognitionWebsite/codeDesign/myDjango02/app01/migrations/0022_teacherregister.py
|
ChunjunHu/FaceRecognitionLibraryWebsite
|
d979d410dfab52d8bda7a5328242b66d6a6b752d
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.1 on 2019-01-11 15:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app01', '0021_auto_20190111_1533'),
]
operations = [
migrations.CreateModel(
name='teacherRegister',
fields=[
('did', models.AutoField(primary_key=True, serialize=False)),
('dname', models.CharField(max_length=30)),
('dkey', models.CharField(max_length=30)),
('demail', models.EmailField(max_length=50)),
('dsex', models.CharField(max_length=30)),
],
),
]
| 28.625 | 78 | 0.541485 |
e16a79d02b7aa53439810709203e7f5be7491805
| 19,141 |
py
|
Python
|
models/loss.py
|
Co1lin/RfDNet
|
9a6910a0d3f8ab3bffbba9d992757d29a1d96bea
|
[
"MIT"
] | 143 |
2021-04-09T12:28:47.000Z
|
2022-03-25T13:57:16.000Z
|
models/loss.py
|
Co1lin/RfDNet
|
9a6910a0d3f8ab3bffbba9d992757d29a1d96bea
|
[
"MIT"
] | 10 |
2021-04-12T08:24:17.000Z
|
2022-01-02T22:33:01.000Z
|
models/loss.py
|
Co1lin/RfDNet
|
9a6910a0d3f8ab3bffbba9d992757d29a1d96bea
|
[
"MIT"
] | 22 |
2021-04-10T06:05:44.000Z
|
2022-03-31T09:08:50.000Z
|
# loss function library.
# author: ynie
# date: Feb, 2020
import numpy as np
import torch
import torch.nn as nn
from external.pyTorchChamferDistance.chamfer_distance import ChamferDistance
from models.registers import LOSSES
from net_utils.nn_distance import nn_distance, huber_loss
chamfer_func = ChamferDistance()
FAR_THRESHOLD = 0.6
NEAR_THRESHOLD = 0.3
GT_VOTE_FACTOR = 3 # number of GT votes per point
OBJECTNESS_CLS_WEIGHTS = [0.2,0.8] # put larger weights on positive objectness
criterion_heading_class = nn.CrossEntropyLoss(reduction='none')
objectness_criterion = nn.CrossEntropyLoss(torch.Tensor(OBJECTNESS_CLS_WEIGHTS).cuda(), reduction='none')
criterion_size_class = nn.CrossEntropyLoss(reduction='none')
criterion_sem_cls = nn.CrossEntropyLoss(reduction='none')
class BaseLoss(object):
'''base loss class'''
def __init__(self, weight=1):
'''initialize loss module'''
self.weight = weight
@LOSSES.register_module
class Null(BaseLoss):
'''This loss function is for modules where a loss preliminary calculated.'''
def __call__(self, loss):
return self.weight * torch.mean(loss)
def compute_vote_loss(est_data, gt_data):
""" Compute vote loss: Match predicted votes to GT votes.
Args:
est_data, gt_data: dict (read-only)
Returns:
vote_loss: scalar Tensor
Overall idea:
If the seed point belongs to an object (votes_label_mask == 1),
then we require it to vote for the object center.
Each seed point may vote for multiple translations v1,v2,v3
A seed point may also be in the boxes of multiple objects:
o1,o2,o3 with corresponding GT votes c1,c2,c3
Then the loss for this seed point is:
min(d(v_i,c_j)) for i=1,2,3 and j=1,2,3
"""
# Load ground truth votes and assign them to seed points
batch_size = est_data['seed_xyz'].shape[0]
num_seed = est_data['seed_xyz'].shape[1] # B,num_seed,3
vote_xyz = est_data['vote_xyz'] # B,num_seed*vote_factor,3
seed_inds = est_data['seed_inds'].long() # B,num_seed in [0,num_points-1]
# Get groundtruth votes for the seed points
# vote_label_mask: Use gather to select B,num_seed from B,num_point
# non-object point has no GT vote mask = 0, object point has mask = 1
# vote_label: Use gather to select B,num_seed,9 from B,num_point,9
# with inds in shape B,num_seed,9 and 9 = GT_VOTE_FACTOR * 3
seed_gt_votes_mask = torch.gather(gt_data['vote_label_mask'], 1, seed_inds)
seed_inds_expand = seed_inds.view(batch_size, num_seed, 1).repeat(1, 1, 3 * GT_VOTE_FACTOR)
seed_gt_votes = torch.gather(gt_data['vote_label'], 1, seed_inds_expand)
seed_gt_votes += est_data['seed_xyz'].repeat(1, 1, 3)
# Compute the min of min of distance
vote_xyz_reshape = vote_xyz.view(batch_size * num_seed, -1,
3) # from B,num_seed*vote_factor,3 to B*num_seed,vote_factor,3
seed_gt_votes_reshape = seed_gt_votes.view(batch_size * num_seed, GT_VOTE_FACTOR,
3) # from B,num_seed,3*GT_VOTE_FACTOR to B*num_seed,GT_VOTE_FACTOR,3
# A predicted vote to no where is not penalized as long as there is a good vote near the GT vote.
dist1, _, dist2, _ = nn_distance(vote_xyz_reshape, seed_gt_votes_reshape, l1=True)
votes_dist, _ = torch.min(dist2, dim=1) # (B*num_seed,vote_factor) to (B*num_seed,)
votes_dist = votes_dist.view(batch_size, num_seed)
vote_loss = torch.sum(votes_dist * seed_gt_votes_mask.float()) / (torch.sum(seed_gt_votes_mask.float()) + 1e-6)
return vote_loss
def compute_objectness_loss(est_data, gt_data):
""" Compute objectness loss for the proposals.
Args:
end_points: dict (read-only)
Returns:
objectness_loss: scalar Tensor
objectness_label: (batch_size, num_seed) Tensor with value 0 or 1
objectness_mask: (batch_size, num_seed) Tensor with value 0 or 1
object_assignment: (batch_size, num_seed) Tensor with long int
within [0,num_gt_object-1]
"""
# Associate proposal and GT objects by point-to-point distances
aggregated_vote_xyz = est_data['aggregated_vote_xyz']
gt_center = gt_data['center_label'][:,:,0:3]
B = gt_center.shape[0]
K = aggregated_vote_xyz.shape[1]
K2 = gt_center.shape[1]
dist1, ind1, dist2, _ = nn_distance(aggregated_vote_xyz, gt_center) # dist1: BxK, dist2: BxK2
# Generate objectness label and mask
# objectness_label: 1 if pred object center is within NEAR_THRESHOLD of any GT object
# objectness_mask: 0 if pred object center is in gray zone (DONOTCARE), 1 otherwise
euclidean_dist1 = torch.sqrt(dist1+1e-6)
objectness_label = torch.zeros((B,K), dtype=torch.long).cuda()
objectness_mask = torch.zeros((B,K)).cuda()
objectness_label[euclidean_dist1<NEAR_THRESHOLD] = 1
objectness_mask[euclidean_dist1<NEAR_THRESHOLD] = 1
objectness_mask[euclidean_dist1>FAR_THRESHOLD] = 1
# Compute objectness loss
objectness_scores = est_data['objectness_scores']
objectness_loss = objectness_criterion(objectness_scores.transpose(2,1), objectness_label)
objectness_loss = torch.sum(objectness_loss * objectness_mask)/(torch.sum(objectness_mask)+1e-6)
# Set assignment
object_assignment = ind1 # (B,K) with values in 0,1,...,K2-1
return objectness_loss, objectness_label, objectness_mask, object_assignment
def compute_box_and_sem_cls_loss(est_data, gt_data, meta_data, config):
""" Compute 3D bounding box and semantic classification loss.
Args:
est_data, gt_data, meta_data: dict (read-only)
Returns:
center_loss
heading_cls_loss
heading_reg_loss
size_cls_loss
size_reg_loss
sem_cls_loss
"""
num_heading_bin = config.num_heading_bin
num_size_cluster = config.num_size_cluster
num_class = config.num_class
mean_size_arr = config.mean_size_arr
object_assignment = meta_data['object_assignment']
batch_size = object_assignment.shape[0]
# Compute center loss
pred_center = est_data['center']
gt_center = gt_data['center_label'][:,:,0:3]
dist1, ind1, dist2, _ = nn_distance(pred_center, gt_center) # dist1: BxK, dist2: BxK2
box_label_mask = gt_data['box_label_mask']
objectness_label = meta_data['objectness_label'].float()
centroid_reg_loss1 = \
torch.sum(dist1*objectness_label)/(torch.sum(objectness_label)+1e-6)
centroid_reg_loss2 = \
torch.sum(dist2*box_label_mask)/(torch.sum(box_label_mask)+1e-6)
center_loss = centroid_reg_loss1 + centroid_reg_loss2
# Compute heading loss
heading_class_label = torch.gather(gt_data['heading_class_label'], 1, object_assignment) # select (B,K) from (B,K2)
heading_class_loss = criterion_heading_class(est_data['heading_scores'].transpose(2,1), heading_class_label) # (B,K)
heading_class_loss = torch.sum(heading_class_loss * objectness_label)/(torch.sum(objectness_label)+1e-6)
heading_residual_label = torch.gather(gt_data['heading_residual_label'], 1, object_assignment) # select (B,K) from (B,K2)
heading_residual_normalized_label = heading_residual_label / (np.pi/num_heading_bin)
# Ref: https://discuss.pytorch.org/t/convert-int-into-one-hot-format/507/3
heading_label_one_hot = torch.cuda.FloatTensor(batch_size, heading_class_label.shape[1], num_heading_bin).zero_()
heading_label_one_hot.scatter_(2, heading_class_label.unsqueeze(-1), 1) # src==1 so it's *one-hot* (B,K,num_heading_bin)
heading_residual_normalized_loss = huber_loss(torch.sum(est_data['heading_residuals_normalized']*heading_label_one_hot, -1) - heading_residual_normalized_label, delta=1.0) # (B,K)
heading_residual_normalized_loss = torch.sum(heading_residual_normalized_loss*objectness_label)/(torch.sum(objectness_label)+1e-6)
# Compute size loss
size_class_label = torch.gather(gt_data['size_class_label'], 1, object_assignment) # select (B,K) from (B,K2)
size_class_loss = criterion_size_class(est_data['size_scores'].transpose(2,1), size_class_label) # (B,K)
size_class_loss = torch.sum(size_class_loss * objectness_label)/(torch.sum(objectness_label)+1e-6)
size_residual_label = torch.gather(gt_data['size_residual_label'], 1, object_assignment.unsqueeze(-1).repeat(1,1,3)) # select (B,K,3) from (B,K2,3)
size_label_one_hot = torch.cuda.FloatTensor(batch_size, size_class_label.shape[1], num_size_cluster).zero_()
size_label_one_hot.scatter_(2, size_class_label.unsqueeze(-1), 1) # src==1 so it's *one-hot* (B,K,num_size_cluster)
size_label_one_hot_tiled = size_label_one_hot.unsqueeze(-1).repeat(1,1,1,3) # (B,K,num_size_cluster,3)
predicted_size_residual_normalized = torch.sum(est_data['size_residuals_normalized']*size_label_one_hot_tiled, 2) # (B,K,3)
mean_size_arr_expanded = torch.from_numpy(mean_size_arr.astype(np.float32)).cuda().unsqueeze(0).unsqueeze(0) # (1,1,num_size_cluster,3)
mean_size_label = torch.sum(size_label_one_hot_tiled * mean_size_arr_expanded, 2) # (B,K,3)
size_residual_label_normalized = size_residual_label / mean_size_label # (B,K,3)
size_residual_normalized_loss = torch.mean(huber_loss(predicted_size_residual_normalized - size_residual_label_normalized, delta=1.0), -1) # (B,K,3) -> (B,K)
size_residual_normalized_loss = torch.sum(size_residual_normalized_loss*objectness_label)/(torch.sum(objectness_label)+1e-6)
# 3.4 Semantic cls loss
sem_cls_label = torch.gather(gt_data['sem_cls_label'], 1, object_assignment) # select (B,K) from (B,K2)
sem_cls_loss = criterion_sem_cls(est_data['sem_cls_scores'].transpose(2,1), sem_cls_label) # (B,K)
sem_cls_loss = torch.sum(sem_cls_loss * objectness_label)/(torch.sum(objectness_label)+1e-6)
return center_loss, heading_class_loss, heading_residual_normalized_loss, size_class_loss, size_residual_normalized_loss, sem_cls_loss
@LOSSES.register_module
class DetectionLoss(BaseLoss):
def __call__(self, est_data, gt_data, dataset_config):
""" Loss functions
Args:
end_points: dict
{
seed_xyz, seed_inds, vote_xyz,
center,
heading_scores, heading_residuals_normalized,
size_scores, size_residuals_normalized,
sem_cls_scores, #seed_logits,#
center_label,
heading_class_label, heading_residual_label,
size_class_label, size_residual_label,
sem_cls_label,
box_label_mask,
vote_label, vote_label_mask
}
config: dataset config instance
Returns:
loss: pytorch scalar tensor
end_points: dict
"""
# Vote loss
vote_loss = compute_vote_loss(est_data, gt_data)
# Obj loss
objectness_loss, objectness_label, objectness_mask, object_assignment = \
compute_objectness_loss(est_data, gt_data)
total_num_proposal = objectness_label.shape[0] * objectness_label.shape[1]
pos_ratio = \
torch.sum(objectness_label.float().cuda()) / float(total_num_proposal)
neg_ratio = \
torch.sum(objectness_mask.float()) / float(total_num_proposal) - pos_ratio
# Box loss and sem cls loss
meta_data = {'object_assignment':object_assignment,
'objectness_label':objectness_label}
center_loss, heading_cls_loss, heading_reg_loss, size_cls_loss, size_reg_loss, sem_cls_loss = \
compute_box_and_sem_cls_loss(est_data, gt_data, meta_data, dataset_config)
box_loss = center_loss + 0.1 * heading_cls_loss + heading_reg_loss + 0.1 * size_cls_loss + size_reg_loss
# Final loss function
loss = vote_loss + 0.5 * objectness_loss + box_loss + 0.1 * sem_cls_loss
loss *= 10
# --------------------------------------------
# Some other statistics
obj_pred_val = torch.argmax(est_data['objectness_scores'], 2) # B,K
obj_acc = torch.sum((obj_pred_val == objectness_label.long()).float() * objectness_mask) / (
torch.sum(objectness_mask) + 1e-6)
return {'total':loss,
'vote_loss': vote_loss.item(),
'objectness_loss': objectness_loss.item(),
'box_loss': box_loss.item(),
'sem_cls_loss': sem_cls_loss.item(),
'pos_ratio': pos_ratio.item(),
'neg_ratio': neg_ratio.item(),
'center_loss': center_loss.item(),
'heading_cls_loss': heading_cls_loss.item(),
'heading_reg_loss': heading_reg_loss.item(),
'size_cls_loss': size_cls_loss.item(),
'size_reg_loss': size_reg_loss.item(),
'obj_acc': obj_acc.item()}
@LOSSES.register_module
class ChamferDist(BaseLoss):
def __call__(self, pointset1, pointset2):
'''
calculate the chamfer distance between two point sets.
:param pointset1 (B x N x 3): torch.FloatTensor
:param pointset2 (B x N x 3): torch.FloatTensor
:return:
'''
dist1, dist2 = chamfer_func(pointset1, pointset2)[:2]
loss = self.weight * ((torch.mean(dist1)) + (torch.mean(dist2)))
return loss
@LOSSES.register_module
class PCN_Loss(BaseLoss):
def __init__(self, weight):
super(PCN_Loss, self).__init__(weight)
self.chamfer_distance = ChamferDist()
def __call__(self, pred_fine, pred_coarses, full_scan, full_scan_coarse):
CD_LOSS = self.chamfer_distance(pred_fine, full_scan)
errG = CD_LOSS + 0.1 * self.chamfer_distance(pred_coarses, full_scan_coarse)
return self.weight * errG, CD_LOSS.item()
@LOSSES.register_module
class ONet_Loss(BaseLoss):
def __call__(self, value):
completion_loss = torch.mean(value[:,0])
mask_loss = torch.mean(value[:,1])
total_loss = self.weight * (completion_loss + 100*mask_loss)
return {'total_loss': total_loss,
'completion_loss': completion_loss.item(),
'mask_loss': mask_loss.item()}
def compute_objectness_loss_boxnet(est_data, gt_data):
""" Compute objectness loss for the proposals.
Args:
end_points: dict (read-only)
Returns:
objectness_loss: scalar Tensor
objectness_label: (batch_size, num_seed) Tensor with value 0 or 1
objectness_mask: (batch_size, num_seed) Tensor with value 0 or 1
object_assignment: (batch_size, num_seed) Tensor with long int
within [0,num_gt_object-1]
"""
# Associate proposal and GT objects by point-to-point distances
aggregated_vote_xyz = est_data['aggregated_vote_xyz']
gt_center = gt_data['center_label'][:,:,0:3]
B = gt_center.shape[0]
K = aggregated_vote_xyz.shape[1]
K2 = gt_center.shape[1]
dist1, ind1, dist2, _ = nn_distance(aggregated_vote_xyz, gt_center) # dist1: BxK, dist2: BxK2
# Generate objectness label and mask
# NOTE: Different from VoteNet, here we use seed label as objectness label.
seed_inds = est_data['seed_inds'].long() # B,num_seed in [0,num_points-1]
seed_gt_votes_mask = torch.gather(gt_data['vote_label_mask'], 1, seed_inds)
est_data['seed_labels'] = seed_gt_votes_mask
aggregated_vote_inds = est_data['aggregated_vote_inds']
objectness_label = torch.gather(est_data['seed_labels'], 1, aggregated_vote_inds.long()) # select (B,K) from (B,1024)
objectness_mask = torch.ones((objectness_label.shape[0], objectness_label.shape[1])).cuda() # no ignore zone anymore
# Compute objectness loss
objectness_scores = est_data['objectness_scores']
criterion = nn.CrossEntropyLoss(torch.Tensor(OBJECTNESS_CLS_WEIGHTS).cuda(), reduction='none')
objectness_loss = criterion(objectness_scores.transpose(2,1), objectness_label)
objectness_loss = torch.sum(objectness_loss * objectness_mask)/(torch.sum(objectness_mask)+1e-6)
# Set assignment
object_assignment = ind1 # (B,K) with values in 0,1,...,K2-1
return objectness_loss, objectness_label, objectness_mask, object_assignment
@LOSSES.register_module
class BoxNetDetectionLoss(BaseLoss):
def __call__(self, est_data, gt_data, dataset_config):
""" Loss functions
Args:
end_points: dict
{
seed_xyz, seed_inds,
center,
heading_scores, heading_residuals_normalized,
size_scores, size_residuals_normalized,
sem_cls_scores, #seed_logits,#
center_label,
heading_class_label, heading_residual_label,
size_class_label, size_residual_label,
sem_cls_label,
box_label_mask,
vote_label, vote_label_mask
}
config: dataset config instance
Returns:
loss: pytorch scalar tensor
end_points: dict
"""
# Obj loss
objectness_loss, objectness_label, objectness_mask, object_assignment = \
compute_objectness_loss_boxnet(est_data, gt_data)
total_num_proposal = objectness_label.shape[0] * objectness_label.shape[1]
pos_ratio = \
torch.sum(objectness_label.float().cuda()) / float(total_num_proposal)
neg_ratio = \
torch.sum(objectness_mask.float()) / float(total_num_proposal) - pos_ratio
# Box loss and sem cls loss
meta_data = {'object_assignment':object_assignment,
'objectness_label':objectness_label}
center_loss, heading_cls_loss, heading_reg_loss, size_cls_loss, size_reg_loss, sem_cls_loss = \
compute_box_and_sem_cls_loss(est_data, gt_data, meta_data, dataset_config)
box_loss = center_loss + 0.1 * heading_cls_loss + heading_reg_loss + 0.1 * size_cls_loss + size_reg_loss
# Final loss function
loss = 0.5 * objectness_loss + box_loss + 0.1 * sem_cls_loss
loss *= 10
# --------------------------------------------
# Some other statistics
obj_pred_val = torch.argmax(est_data['objectness_scores'], 2) # B,K
obj_acc = torch.sum((obj_pred_val == objectness_label.long()).float() * objectness_mask) / (
torch.sum(objectness_mask) + 1e-6)
return {'total':loss,
'objectness_loss': objectness_loss.item(),
'box_loss': box_loss.item(),
'sem_cls_loss': sem_cls_loss.item(),
'pos_ratio': pos_ratio.item(),
'neg_ratio': neg_ratio.item(),
'center_loss': center_loss.item(),
'heading_cls_loss': heading_cls_loss.item(),
'heading_reg_loss': heading_reg_loss.item(),
'size_cls_loss': size_cls_loss.item(),
'size_reg_loss': size_reg_loss.item(),
'obj_acc': obj_acc.item()}
| 46.2343 | 183 | 0.677446 |
6b7f6253e67eaa8c64f3ba0b691e1fe34855f7f1
| 1,277 |
py
|
Python
|
artists/models.py
|
flannerykj/urbanapplause
|
c9b6c0f9a2f65b869fe1e6fa921972e7236e4fe5
|
[
"MIT"
] | null | null | null |
artists/models.py
|
flannerykj/urbanapplause
|
c9b6c0f9a2f65b869fe1e6fa921972e7236e4fe5
|
[
"MIT"
] | null | null | null |
artists/models.py
|
flannerykj/urbanapplause
|
c9b6c0f9a2f65b869fe1e6fa921972e7236e4fe5
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.contrib.auth.models import User
import datetime
from django.utils import timezone
from geoposition.fields import GeopositionField
from django.conf import settings
from taggit.managers import TaggableManager
from taggit.models import TaggedItemBase
class InstrumentTag(TaggedItemBase):
content_object = models.ForeignKey('Artist')
class Artist(models.Model):
name = models.CharField(max_length=100)
pub_date = models.DateTimeField(auto_now_add=True)
last_updated = models.DateTimeField(auto_now=True)
author = models.ForeignKey(User)
tags = TaggableManager(through=InstrumentTag)
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('artists', kwargs={'pk': self.pk})
class Musician(Artist):
INSTRUMENT_CHOICES = (
('Unspecified', 'Unspecified'),
('Guitar', 'Guitar'),
('Violin', 'Violin'),
('Drums', 'Drums'),
('Keyboard', 'Keyboard'),
('Voice', 'Voice'),
)
instruments = models.CharField(max_length=100, choices=INSTRUMENT_CHOICES, default='Unspecified')
| 32.74359 | 98 | 0.768207 |
04c777fd7c421d19c5da564359623c6dcb4e3b7f
| 3,843 |
py
|
Python
|
src/securityAbandonerAndInjector/NonpublicVarAccessdByPublicFunc/main.py
|
xf97/HuangGai
|
40a349be6102d5eb63893fb914659405ae162d93
|
[
"MIT"
] | 23 |
2020-09-20T02:10:44.000Z
|
2022-03-22T12:58:13.000Z
|
src/securityAbandonerAndInjector/NonpublicVarAccessdByPublicFunc/main.py
|
contractshark/HuangGai
|
1b26f77b043aa5903774420964c61ab370eb6c7a
|
[
"MIT"
] | 3 |
2020-09-22T15:28:33.000Z
|
2022-01-22T07:48:53.000Z
|
src/securityAbandonerAndInjector/NonpublicVarAccessdByPublicFunc/main.py
|
contractshark/HuangGai
|
1b26f77b043aa5903774420964c61ab370eb6c7a
|
[
"MIT"
] | 5 |
2021-07-15T02:45:09.000Z
|
2022-03-21T13:36:40.000Z
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
#cache路径
CACHE_PATH = "./cache/"
#缓存合约路径
CACHE_CONTRACT_PATH = "./cache/temp.sol"
#缓存路径信息文件
CACHE_PATHINFO_PATH = "./cache/temp_sol.json"
#缓存抽象语法树文件
CACHE_AST_PATH = "./cache/temp.sol_json.ast"
#源代码保存路径
CONTRACT_PATH = "../../contractExtractor/NonpublicVarAccessdByPublicFuncExtractor/result"
#注入信息保存路径
INJECT_INFO_PATH = "../../contractExtractor/NonpublicVarAccessdByPublicFuncExtractor/injectInfo"
#sol文件后缀
SOL_SUFFIX = ".sol"
#json.ast文件后缀
JSON_AST_SUFFIX = "_json.ast"
from NonpublicVarAccessdByPublicFuncInjector import NonpublicVarAccessdByPublicFuncInjector #注入器
import os
import time
class NonpublicVarAccessdByPublicFunc:
def __init__(self, _injectInfo, _contractPath):
self.injectInfo = _injectInfo #所有文件的路径信息情况
self.targetInfoFile = self.targetPathInfo(self.injectInfo)
self.targetContract = self.targetContractList(self.targetInfoFile, _contractPath) #合约列表
self.targetAstFile = self.targetAstList(self.targetInfoFile, _contractPath) #ast列表
self.nowNum = 0
try:
os.mkdir(CACHE_PATH) #建立缓存文件夹
except:
#print("The cache folder already exists.")
pass
def targetAstList(self, _fileList, _contractPath):
result = list()
for filename in _fileList:
jsonAstName = os.path.splitext(os.path.split(filename)[1])[0] + SOL_SUFFIX + JSON_AST_SUFFIX
result.append(os.path.join(_contractPath, jsonAstName))
return result
def targetContractList(self, _fileList, _contractPath):
result = list()
for filename in _fileList:
contractName = os.path.splitext(os.path.split(filename)[1])[0] + SOL_SUFFIX
result.append(os.path.join(_contractPath, contractName))
return result
def targetPathInfo(self, _pathInfo):
fileList = os.listdir(_pathInfo)
result = list()
for item in fileList:
result.append(os.path.join(_pathInfo, item))
return result
def getInfoFile(self, _contractName, _infoFileList):
preName = os.path.splitext(os.path.split(_contractName)[1])[0]
for file in _infoFileList:
if preName in file:
return file
else:
continue
return str()
def getAstFile(self, _contractName, _astFileList):
preName = os.path.splitext(os.path.split(_contractName)[1])[0]
for file in _astFileList:
if preName in file:
return file
else:
continue
return str()
def cacheFile(self, _contract, _pathInfo, _astPath):
try:
with open(CACHE_CONTRACT_PATH, "w+", encoding = "utf-8") as f:
f.write(open(_contract).read())
with open(CACHE_PATHINFO_PATH, "w+", encoding = "utf-8") as f:
f.write(open(_pathInfo).read())
with open(CACHE_AST_PATH, "w+", encoding = "utf-8") as f:
f.write(open(_astPath).read())
return
except:
raise Exception("Failed to cache contract.")
def run(self):
stime = time.time()
contractNum = 0
for contractFile in self.targetContract:
contractNum += 1
try:
#1. 获取每个合约的源代码, ast和注入信息
pathInfoFile = self.getInfoFile(contractFile, self.targetInfoFile)
astFile = self.getAstFile(contractFile, self.targetAstFile)
print("\r\t Injecting contract: ", os.path.split(contractFile)[1], end = "")
#2. 缓存当前文件
self.cacheFile(contractFile, pathInfoFile, astFile)
#3. 根据目标路径和源代码注入bug
NI = NonpublicVarAccessdByPublicFuncInjector(CACHE_CONTRACT_PATH, CACHE_PATHINFO_PATH, astFile, self.getOriginalContractName(contractFile))
NI.inject()
NI.output()
#4. 输出进度
self.nowNum += 1
#print("\r当前注入进度: %.2f" % (self.nowNum / len(self.targetContract)))
except Exception as e:
self.nowNum += 1
#print(e)
continue
print()
#print(time.time() - stime)
#print(contractNum)
def getOriginalContractName(self, _contractPath):
return os.path.splitext(os.path.split(_contractPath)[1])[0]
#单元测试
if __name__ == "__main__":
nvabpf = NonpublicVarAccessdByPublicFunc(INJECT_INFO_PATH, CONTRACT_PATH)
nvabpf.run()
| 30.991935 | 143 | 0.733021 |
043a3eafcaef83a163556f238ca9590cd1b1953f
| 358 |
py
|
Python
|
scripts/figures/figure5/pipeswitch_inception_v3/remote_run_data.py
|
CcTtry/PipeSwitch
|
c6d632ee20b6dbbaea9a6fb95b9ea0ed4bbbf67e
|
[
"Apache-2.0"
] | null | null | null |
scripts/figures/figure5/pipeswitch_inception_v3/remote_run_data.py
|
CcTtry/PipeSwitch
|
c6d632ee20b6dbbaea9a6fb95b9ea0ed4bbbf67e
|
[
"Apache-2.0"
] | null | null | null |
scripts/figures/figure5/pipeswitch_inception_v3/remote_run_data.py
|
CcTtry/PipeSwitch
|
c6d632ee20b6dbbaea9a6fb95b9ea0ed4bbbf67e
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
from scripts.common.util import RunDocker
def main():
with RunDocker('pipeswitch:pipeswitch', 'figure5_pipeswitch_inception_v3') as rd:
# Start the server: pipeswitch
rd.run('python PipeSwitch/scripts/run_data.py')
# Get and return the data point
if __name__ == '__main__':
main()
| 25.571429 | 86 | 0.659218 |
8e4efe5d5a222cebdec85a1463c382d34ab22321
| 257 |
py
|
Python
|
Harijith/Web-Scrapping
|
c474071e4e929ec3c44d63484251c3d0096a7836
|
[
"bzip2-1.0.6"
] | null | null | null |
Harijith/Web-Scrapping
|
c474071e4e929ec3c44d63484251c3d0096a7836
|
[
"bzip2-1.0.6"
] | null | null | null |
Harijith/Web-Scrapping
|
c474071e4e929ec3c44d63484251c3d0096a7836
|
[
"bzip2-1.0.6"
] | null | null | null |
x=float(input("\n Enter the Score"))
if(x>=0.9 and x<=1):
print("A")
elif(x>=0.8 and x<=1):
print("B")
elif(x>=0.7 and x<=1):
print("C")
elif(x>=0.6 and x<=1):
print("D")
elif(x<0.6):
print("F")
else:
print("Bad Score")
| 18.357143 | 37 | 0.486381 |
|||
999b8020ed4727d7ca8aa40296d5fee3b62e178a
| 326 |
py
|
Python
|
robonomicsinterface/exceptions.py
|
Multi-Agent-io/robonomics-interface
|
139276c93b25e39ff0bf537cf6e5632234dbbc50
|
[
"Apache-2.0"
] | 3 |
2022-01-14T13:50:01.000Z
|
2022-02-19T19:02:47.000Z
|
robonomicsinterface/exceptions.py
|
Multi-Agent-io/robonomics-interface
|
139276c93b25e39ff0bf537cf6e5632234dbbc50
|
[
"Apache-2.0"
] | 18 |
2021-11-10T12:11:26.000Z
|
2022-03-23T14:17:37.000Z
|
robonomicsinterface/exceptions.py
|
Multi-Agent-io/robonomics-interface
|
139276c93b25e39ff0bf537cf6e5632234dbbc50
|
[
"Apache-2.0"
] | 2 |
2021-12-29T09:17:16.000Z
|
2022-03-18T14:06:03.000Z
|
class NoPrivateKeyException(Exception):
"""
No private key was provided so unable to perform any operations requiring message signing.
"""
pass
class DigitalTwinMapException(Exception):
"""
No Digital Twin was created with this index or there is no such topic in Digital Twin map.
"""
pass
| 21.733333 | 94 | 0.699387 |
83499540ad2b4e26c25948c04eb624afa5113656
| 1,731 |
py
|
Python
|
tests/test_causal_frames.py
|
solalatus/justcause
|
af6240cbcf33ba42b8e784703fb0d92e1396f937
|
[
"MIT"
] | 114 |
2019-09-24T07:47:05.000Z
|
2022-02-19T09:37:12.000Z
|
tests/test_causal_frames.py
|
solalatus/justcause
|
af6240cbcf33ba42b8e784703fb0d92e1396f937
|
[
"MIT"
] | 29 |
2019-10-22T07:15:49.000Z
|
2020-11-30T10:13:24.000Z
|
tests/test_causal_frames.py
|
solalatus/justcause
|
af6240cbcf33ba42b8e784703fb0d92e1396f937
|
[
"MIT"
] | 12 |
2020-01-20T12:56:35.000Z
|
2022-02-05T17:44:47.000Z
|
import pytest
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from justcause.data.frames import CausalFrame
def test_create_causal_frame(dummy_df):
CausalFrame(dummy_df, covariates=["a", "b"])
with pytest.raises(AssertionError):
CausalFrame(dummy_df)
with pytest.raises(AssertionError):
CausalFrame(dummy_df, covariates=["a", "b", "c"])
def test_causal_frame_operations(dummy_cf):
cf = dummy_cf[dummy_cf["a"] <= 5]
assert isinstance(cf, CausalFrame)
dummy_cf.drop("b", axis=1)
assert isinstance(cf["a"], pd.Series)
assert not hasattr(cf["a"], "_names")
def test_names_extension(dummy_cf, dummy_df):
with pytest.raises(AssertionError):
_ = dummy_df.names.covariates
covariates = dummy_cf.names.covariates
assert covariates == ["a", "b"]
others = dummy_cf.names.others
assert others == ["rep", "sample_id"]
def test_np_extension(dummy_cf, dummy_df):
with pytest.raises(AssertionError):
_ = dummy_df.np.X
X = dummy_cf.np.X
assert isinstance(X, np.ndarray)
assert_array_equal(dummy_cf[["a", "b"]].to_numpy(), X)
y = dummy_cf.np.y
assert isinstance(y, np.ndarray)
assert_array_equal(dummy_cf["y"].to_numpy(), y)
t = dummy_cf.np.t
assert isinstance(t, np.ndarray)
assert_array_equal(dummy_cf["t"].to_numpy(), t)
dummy_cf_no_X = dummy_cf.drop(["a", "b"], axis=1)
with pytest.raises(KeyError):
_ = dummy_cf_no_X.np.X
dummy_cf_no_y = dummy_cf.drop("y", axis=1)
with pytest.raises(KeyError):
_ = dummy_cf_no_y.np.y
dummy_cf_no_t = dummy_cf.drop("t", axis=1)
with pytest.raises(KeyError):
_ = dummy_cf_no_t.np.t
| 25.455882 | 58 | 0.675332 |
21bf7a642554503be05c6e86767f11b9aad0c165
| 3,182 |
py
|
Python
|
src/pyspark_utilities/feature/weights_of_evidence.py
|
gbisschoff/pyspark-utilities
|
e234a5de75a6ab975f4feccfbeaf5c9170a74ca4
|
[
"MIT"
] | null | null | null |
src/pyspark_utilities/feature/weights_of_evidence.py
|
gbisschoff/pyspark-utilities
|
e234a5de75a6ab975f4feccfbeaf5c9170a74ca4
|
[
"MIT"
] | null | null | null |
src/pyspark_utilities/feature/weights_of_evidence.py
|
gbisschoff/pyspark-utilities
|
e234a5de75a6ab975f4feccfbeaf5c9170a74ca4
|
[
"MIT"
] | null | null | null |
from pyspark import keyword_only ## < 2.0 -> pyspark.ml.util.keyword_only
from pyspark.ml.param.shared import HasInputCols, HasOutputCols, Param, Params, TypeConverters, HasLabelCol
# Available in PySpark >= 2.3.0
from pyspark.ml.util import DefaultParamsReadable, DefaultParamsWritable
from pyspark.ml.pipeline import Estimator, Model
from pyspark.sql.functions import col, create_map, lit, log, sum, when
from pyspark.sql.window import Window
from itertools import chain
from functools import reduce
class WeightsOfEvidence(Estimator, HasInputCols, HasOutputCols, HasLabelCol, DefaultParamsReadable, DefaultParamsWritable):
@keyword_only
def __init__(self, inputCols=None, outputCols=None, labelCol=None):
super(WeightsOfEvidence, self).__init__()
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCols=None, outputCols=None, labelCol=None):
kwargs = self._input_kwargs
return self._set(**kwargs)
def setLabelCol(self, value):
return self._set(labelCol=value)
def getLabelCol(self):
return self.getOrDefault(self.labelCol)
def _fit(self, dataframe):
"""Fit transformer."""
def get_mapping(c):
mapping_df = dataframe\
.groupBy(c)\
.agg(
sum(when(col(self.getLabelCol()) == 0, 1).otherwise(0)).alias("good"),
sum(when(col(self.getLabelCol()) == 1, 1).otherwise(0)).alias("bad")
)\
.withColumn('woe', log((col('good')/sum(col('good')).over(Window.partitionBy()))/(col('bad')/sum(col('bad')).over(Window.partitionBy()))))\
.drop('good', 'bad')
return reduce(lambda a, b: dict(a, **b), [{r[c]: r['woe']} for r in mapping_df.collect()])
mappings = {c: get_mapping(c) for c in self.getInputCols()}
return WeightsOfEvidenceModel(inputCols=self.getInputCols(), outputCols=self.getOutputCols(), mappings=mappings)
class WeightsOfEvidenceModel(Model, HasInputCols, HasOutputCols, DefaultParamsReadable, DefaultParamsWritable,):
@keyword_only
def __init__(self, inputCols=None, outputCols=None, mappings=None):
"""Initialize."""
super(WeightsOfEvidenceModel, self).__init__()
self.mappings = Param(self, "mappings", "WoE Mapping")
self._setDefault(mappings={})
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCols=None, outputCols=None, mappings=None):
"""Get params."""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setMappings(self, value):
return self._set(mappings=value)
def getMappings(self):
return self.getOrDefault(self.mappings)
def _transform(self, dataframe):
def get_mapping_expr(mapping):
return create_map([lit(x) for x in chain(*mapping.items())])
return dataframe.select(['*']+[(get_mapping_expr(self.getMappings()[i]).getItem(col(i))).alias(o) for i, o in zip(self.getInputCols(), self.getOutputCols())])
| 41.324675 | 173 | 0.655248 |
f537d38b258da626d236b9de56fd54f84fdd290a
| 477 |
py
|
Python
|
02 - Curso Em Video/Aula 14/E - 061.py
|
GabrielTrentino/Python_Basico
|
f13f6448c275c14896337d2018b04cbf5a54efd3
|
[
"MIT"
] | null | null | null |
02 - Curso Em Video/Aula 14/E - 061.py
|
GabrielTrentino/Python_Basico
|
f13f6448c275c14896337d2018b04cbf5a54efd3
|
[
"MIT"
] | null | null | null |
02 - Curso Em Video/Aula 14/E - 061.py
|
GabrielTrentino/Python_Basico
|
f13f6448c275c14896337d2018b04cbf5a54efd3
|
[
"MIT"
] | null | null | null |
print('Gerador de PA')
print('-='*10)
termo1 = int(input('Digite o primeiro termo: '))
razao = int(input('Digite a razão da PA: '))
termos = 10
ant = termo1
while termos != 0:
prox = ant + razao
print('{} -> '.format(ant) if termos > 1 else '{} '.format(ant), end = '')
ant = prox
termos -= 1
while termos == 0:
termos = int(input('Digite a quantidade de termos que devem ser calculados: '))
if termos == 0:
break
| 29.8125 | 88 | 0.557652 |
21e8862cbf811e0df0fa904a0fbe6d06613c14f1
| 2,810 |
py
|
Python
|
src/main.py
|
shigarus/NewsParser
|
b373f7c047b032e761a3a02f6036c8b3c7107761
|
[
"MIT"
] | null | null | null |
src/main.py
|
shigarus/NewsParser
|
b373f7c047b032e761a3a02f6036c8b3c7107761
|
[
"MIT"
] | null | null | null |
src/main.py
|
shigarus/NewsParser
|
b373f7c047b032e761a3a02f6036c8b3c7107761
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import argparse
import codecs
import json
import logging
import os
import htmltoreadable
import toolkit
def write_to_file(url, text):
"""
Write text to path like
default.ru/news/2013/03/dtp/index.html => [CUR_DIR]/default.ru/news/2013/03/dtp/index.txt
:param url: basestring
:param text: basestring
"""
if not isinstance(url, basestring):
raise TypeError('url has to be basestring instance')
if not isinstance(text, basestring):
raise TypeError('text has to be basestring instance')
url = toolkit.morph_url(url)
dir_path = os.path.dirname(url)
file_path = url
has_extension = True in (
file_path.endswith(ext)
for ext in ('.html', '.shtml', '.php')
)
if has_extension:
point_pos = file_path.rfind('.')
file_path = file_path[:point_pos]
file_path = u''.join((
file_path,
'.txt'
))
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with codecs.open(file_path, 'w', encoding='utf-8') as fh:
fh.write(text)
def main():
# parse args
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--url', help='Target page url')
parser.add_argument(
'-t',
'--target',
help='Css selector to process text.'
)
parser.add_argument(
'-e',
'--exclude',
help='Css selector to exclude text.'
)
parser.add_argument('-c', '--config', help='Path to config file')
parser.add_argument('-d', '--debug', action='store_true')
parser.set_defaults(
debug=False,
config='config.json',
exclude=None
)
args = parser.parse_args()
# /parse args
if args.debug:
logging.basicConfig(level=logging.DEBUG)
# getting config
if os.path.exists(args.config):
with codecs.open(args.config, 'r', encoding='utf-8') as fh:
config = json.load(fh)
else:
config = dict(
urls=[],
rules={}
)
# getting rules and urls for processing
if args.url:
url = args.url
site_name = toolkit.get_site_name(url)
if args.target:
exclude = args.exclude
rule = dict(
include=[args.target, ],
exclude=[exclude, ] if exclude else []
)
rules = {
site_name: rule
}
else:
rules = config['rules']
urls = [url, ]
else:
rules = config['rules']
urls = config['urls']
# process urls
text_extractor = htmltoreadable.HtmlTextExtractor(rules)
for url in urls:
text = text_extractor.get_text(url)
write_to_file(url, text)
if __name__ == '__main__':
main()
| 24.017094 | 97 | 0.569751 |
143fff24bb199d4e2198279bfb0d474f015a51b3
| 140 |
py
|
Python
|
search.py
|
yatharthgeek/wikipedia-search
|
966d363f1e4ec14cc44b2b420653849f325b1da0
|
[
"MIT"
] | 1 |
2021-10-03T16:21:18.000Z
|
2021-10-03T16:21:18.000Z
|
search.py
|
yatharthgeek/wikipedia-search
|
966d363f1e4ec14cc44b2b420653849f325b1da0
|
[
"MIT"
] | null | null | null |
search.py
|
yatharthgeek/wikipedia-search
|
966d363f1e4ec14cc44b2b420653849f325b1da0
|
[
"MIT"
] | null | null | null |
import wikipedia
bash= input("Ask Question ==>>> ")
result = wikipedia.summary(bash, sentences = 2)
# printing the result
print(result)
| 14 | 47 | 0.707143 |
f19e3258713dd7b3ccf8f85643cc19156c3d4167
| 1,242 |
py
|
Python
|
lambda/mynotes/adapter/s3_bucket_adapter.py
|
scalasm/my-notes
|
f023baad2908d9fe010490deb1891e409fb498a8
|
[
"MIT"
] | null | null | null |
lambda/mynotes/adapter/s3_bucket_adapter.py
|
scalasm/my-notes
|
f023baad2908d9fe010490deb1891e409fb498a8
|
[
"MIT"
] | 10 |
2022-03-14T22:26:25.000Z
|
2022-03-25T00:00:31.000Z
|
lambda/mynotes/adapter/s3_bucket_adapter.py
|
scalasm/my-notes
|
f023baad2908d9fe010490deb1891e409fb498a8
|
[
"MIT"
] | null | null | null |
from typing import Any
from mynotes.core.architecture import ContentUploadException, ObjectStore
class S3BucketAdapter(ObjectStore):
"""
Adapter implementation for the S3 object store.
"""
bucket_name: str
s3_resource: Any
def __init__(self, s3_resource: Any, bucket_name: str) -> None:
self.s3_resource = s3_resource
self.bucket_name = bucket_name
def store(self, object_key: str, content: str) -> None:
object = self.s3_resource.Object(self.bucket_name, object_key)
result = object.put(Body=content)
res = result.get('ResponseMetadata')
if not res.get('HTTPStatusCode') == 200:
raise ContentUploadException(f"Upload to bucket {self.bucket_name} failed for key {object_key}!")
def load(self, object_key: str) -> str:
object = self.s3_resource.Object(self.bucket_name, object_key)
content = object.get()['Body'].read().decode('utf-8')
return content
def delete(self, object_key: str) -> None:
object = self.s3_resource.Object(self.bucket_name, object_key)
# We don't care about the response - either the object was deleted (if present)
# or it was not present!
object.delete()
| 34.5 | 109 | 0.669887 |
8487ba2d9d10b4962a9d232b23a42b7d0f26bd73
| 3,529 |
py
|
Python
|
bindings/python/ensmallen/datasets/string/haloferaxvolcanii.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5 |
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/haloferaxvolcanii.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18 |
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/haloferaxvolcanii.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3 |
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Haloferax volcanii.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def HaloferaxVolcanii(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Haloferax volcanii graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Haloferax volcanii graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="HaloferaxVolcanii",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.675926 | 223 | 0.675829 |
4768ce6134ad73c9a6e6b696623d54969f7c6e15
| 1,022 |
py
|
Python
|
common/ClassificationMetrics.py
|
sum-coderepo/HadoopApp
|
0e8d48c5d541b5935c9054fb1335d829d67d7b59
|
[
"Apache-2.0"
] | 2 |
2020-05-26T23:58:32.000Z
|
2020-11-01T20:45:30.000Z
|
common/ClassificationMetrics.py
|
sum-coderepo/HadoopApp
|
0e8d48c5d541b5935c9054fb1335d829d67d7b59
|
[
"Apache-2.0"
] | null | null | null |
common/ClassificationMetrics.py
|
sum-coderepo/HadoopApp
|
0e8d48c5d541b5935c9054fb1335d829d67d7b59
|
[
"Apache-2.0"
] | null | null | null |
from sklearn.metrics import *
class ClassificationMetrics(object):
"""description of class"""
def __init__(self, yPredict,yActual):
self.yPredict = yPredict
self.yActual= yActual
if(self.yPredict is None or self.yActual is None):
raise(Exception('yPredict and yActual cannot be null'))
def getAccuracyScore(self):
return accuracy_score(self.yActual,self.yPredict)
def getPrecisionScore(self):
return precision_score(self.yActual,self.yPredict,average='micro')
def getRecallScore(self):
return recall_score(self.yActual,self.yPredict,average='micro')
def getF1Score(self):
return f1_score(self.yActual,self.yPredict,average='micro')
def getROCAUCScore(self):
return roc_auc_score(self.yActual,self.yPredict,average='micro')
def getLogLossScore(self):
return log_loss(self.yActual,self.yPredict)
def getClassificationReport(self):
return classification_report(self.yActual,self.yPredict)
| 34.066667 | 74 | 0.709393 |
5733b355dd94b2bc98c901eef0933a81a8efa490
| 515 |
py
|
Python
|
src/bdbd_common/messageSingle.py
|
rkent/bdbd_common
|
0d6f2cd40f5e83f05d6a2620c00a3b492bbe9ff4
|
[
"MIT"
] | null | null | null |
src/bdbd_common/messageSingle.py
|
rkent/bdbd_common
|
0d6f2cd40f5e83f05d6a2620c00a3b492bbe9ff4
|
[
"MIT"
] | null | null | null |
src/bdbd_common/messageSingle.py
|
rkent/bdbd_common
|
0d6f2cd40f5e83f05d6a2620c00a3b492bbe9ff4
|
[
"MIT"
] | null | null | null |
try:
from Queue import Queue
except:
from queue import Queue
import rospy
def messageSingle(topic, type):
responseQueue = Queue()
sub = rospy.Subscriber(topic, type, lambda msg:responseQueue.put(msg))
result = responseQueue.get()
sub.unregister()
return result
if __name__ == '__main__':
from sensor_msgs.msg import CameraInfo
rospy.init_node('test')
while not rospy.is_shutdown():
print(messageSingle('/bdbd/pantilt_camera/camera_info', CameraInfo))
break
| 25.75 | 76 | 0.700971 |
a76960a76610a44d5f8f1401e7d3b73fb4660c56
| 6,185 |
py
|
Python
|
aws-inventory/lambda/inventory-client-vpn.py
|
dkeppel626/antiope
|
c8a540e92878cb220be9918c20bb9458d4541d1a
|
[
"Apache-2.0"
] | 210 |
2019-01-11T20:58:23.000Z
|
2022-03-16T18:51:17.000Z
|
aws-inventory/lambda/inventory-client-vpn.py
|
dkeppel626/antiope
|
c8a540e92878cb220be9918c20bb9458d4541d1a
|
[
"Apache-2.0"
] | 13 |
2018-11-23T19:06:05.000Z
|
2020-08-19T20:05:28.000Z
|
aws-inventory/lambda/inventory-client-vpn.py
|
dkeppel626/antiope
|
c8a540e92878cb220be9918c20bb9458d4541d1a
|
[
"Apache-2.0"
] | 44 |
2018-11-21T15:51:24.000Z
|
2022-03-11T01:21:24.000Z
|
import boto3
from botocore.exceptions import ClientError
import json
import os
import time
from datetime import datetime, timezone
from dateutil import tz
from antiope.aws_account import *
from common import *
import logging
logger = logging.getLogger()
logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO')))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
RESOURCE_PATH = "ec2/clientvpn"
def lambda_handler(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=True))
message = json.loads(event['Records'][0]['Sns']['Message'])
logger.info("Received message: " + json.dumps(message, sort_keys=True))
try:
target_account = AWSAccount(message['account_id'])
for r in target_account.get_regions():
try:
discover_client_vpn_endpoints(target_account, r)
except ClientError as e:
# Move onto next region if we get access denied. This is probably SCPs
if e.response['Error']['Code'] == 'AccessDeniedException':
logger.error(f"AccessDeniedException for region {r} in function {context.function_name} for {target_account.account_name}({target_account.account_id})")
continue
elif e.response['Error']['Code'] == 'UnauthorizedOperation':
logger.error(f"UnauthorizedOperation for region {r} in function {context.function_name} for {target_account.account_name}({target_account.account_id})")
continue
else:
raise # pass on to the next handler
except AntiopeAssumeRoleError as e:
logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id))
return()
except ClientError as e:
if e.response['Error']['Code'] == 'UnauthorizedOperation':
logger.error("Antiope doesn't have proper permissions to this account")
return(event)
logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e))
capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e))
raise
except Exception as e:
logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context)))
capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e))
raise
def discover_client_vpn_endpoints(target_account, region):
'''Iterate accross all regions to discover client vpn endpoints'''
ec2_client = target_account.get_client('ec2', region=region)
response = ec2_client.describe_client_vpn_endpoints()
if response['ClientVpnEndpoints']:
for cvpn in response['ClientVpnEndpoints']:
resource_item = {}
resource_item['awsAccountId'] = target_account.account_id
resource_item['awsAccountName'] = target_account.account_name
resource_item['resourceType'] = "AWS::EC2::ClientVpnEndpoint"
resource_item['source'] = "Antiope"
resource_item['awsRegion'] = region
resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now())
resource_item['configuration'] = cvpn
resource_item['supplementaryConfiguration'] = {}
resource_item['resourceId'] = cvpn['ClientVpnEndpointId']
resource_item['resourceCreationTime'] = cvpn['CreationTime']
resource_item['errors'] = {}
if 'Tags' in cvpn:
resource_item['tags'] = parse_tags(cvpn['Tags'])
# Get any active VPN connections to the endpoint and add as part of the supplementary configuration.
connections = discover_client_vpn_connections(ec2_client, cvpn['ClientVpnEndpointId'])
resource_item['supplementaryConfiguration']['Connections'] = connections
# Obtain other network configuration associated with the VPN endpoint and add as part of the supplementary configuration.
routes = discover_client_vpn_routes(ec2_client, cvpn['ClientVpnEndpointId'])
resource_item['supplementaryConfiguration']['Routes'] = routes
targets = discover_client_vpn_targets(ec2_client, cvpn['ClientVpnEndpointId'])
resource_item['supplementaryConfiguration']['ClientVpnTargetNetworks'] = targets
# Save files to S3
save_resource_to_s3(RESOURCE_PATH, cvpn['ClientVpnEndpointId'], resource_item)
logger.info("Discovered Client VPN connection ({}) in account {} for region {}".format(cvpn['ClientVpnEndpointId'], target_account.account_id, region))
logger.debug("Data: {}".format(resource_item))
else:
logger.debug("No Client VPN connections found for account {} in region {}".format(target_account.account_id, region))
def discover_client_vpn_connections(ec2_client, vpnId):
'''Get client VPN endpoint configuration based on the endpointId'''
response = ec2_client.describe_client_vpn_connections(
ClientVpnEndpointId=vpnId,
)
return(response['Connections'])
def discover_client_vpn_routes(ec2_client, vpnId):
'''Get client VPN routes configuration based on the endpointId'''
response = ec2_client.describe_client_vpn_routes(
ClientVpnEndpointId=vpnId,
)
return(response['Routes'])
def discover_client_vpn_targets(ec2_client, vpnId):
'''Get client VPN target networks configuration based on the endpointId'''
response = ec2_client.describe_client_vpn_target_networks(
ClientVpnEndpointId=vpnId,
)
return(response['ClientVpnTargetNetworks'])
| 46.856061 | 172 | 0.645918 |
86b3071fef2f820ebb726caf3bf3b35512c9382b
| 5,890 |
py
|
Python
|
train.py
|
chiemenz/nd00333_AZMLND_Optimizing_a_Pipeline_in_Azure-Starter_Files
|
e33d7321511bc4d39fdc406eacb29305d94bf9a7
|
[
"MIT"
] | null | null | null |
train.py
|
chiemenz/nd00333_AZMLND_Optimizing_a_Pipeline_in_Azure-Starter_Files
|
e33d7321511bc4d39fdc406eacb29305d94bf9a7
|
[
"MIT"
] | null | null | null |
train.py
|
chiemenz/nd00333_AZMLND_Optimizing_a_Pipeline_in_Azure-Starter_Files
|
e33d7321511bc4d39fdc406eacb29305d94bf9a7
|
[
"MIT"
] | null | null | null |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.linear_model import LogisticRegression\n",
"import argparse\n",
"import os\n",
"import numpy as np\n",
"from sklearn.metrics import mean_squared_error\n",
"import joblib\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.preprocessing import OneHotEncoder\n",
"import pandas as pd\n",
"from azureml.core.run import Run\n",
"from azureml.data.dataset_factory import TabularDatasetFactory"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def clean_data(data):\n",
" # Dict for cleaning data\n",
" months = {\"jan\":1, \"feb\":2, \"mar\":3, \"apr\":4, \"may\":5, \"jun\":6, \"jul\":7, \"aug\":8, \"sep\":9, \"oct\":10, \"nov\":11, \"dec\":12}\n",
" weekdays = {\"mon\":1, \"tue\":2, \"wed\":3, \"thu\":4, \"fri\":5, \"sat\":6, \"sun\":7}\n",
"\n",
" # Clean and one hot encode data\n",
" x_df = data.to_pandas_dataframe().dropna()\n",
" jobs = pd.get_dummies(x_df.job, prefix=\"job\")\n",
" x_df.drop(\"job\", inplace=True, axis=1)\n",
" x_df = x_df.join(jobs)\n",
" x_df[\"marital\"] = x_df.marital.apply(lambda s: 1 if s == \"married\" else 0)\n",
" x_df[\"default\"] = x_df.default.apply(lambda s: 1 if s == \"yes\" else 0)\n",
" x_df[\"housing\"] = x_df.housing.apply(lambda s: 1 if s == \"yes\" else 0)\n",
" x_df[\"loan\"] = x_df.loan.apply(lambda s: 1 if s == \"yes\" else 0)\n",
" contact = pd.get_dummies(x_df.contact, prefix=\"contact\")\n",
" x_df.drop(\"contact\", inplace=True, axis=1)\n",
" x_df = x_df.join(contact)\n",
" education = pd.get_dummies(x_df.education, prefix=\"education\")\n",
" x_df.drop(\"education\", inplace=True, axis=1)\n",
" x_df = x_df.join(education)\n",
" x_df[\"month\"] = x_df.month.map(months)\n",
" x_df[\"day_of_week\"] = x_df.day_of_week.map(weekdays)\n",
" x_df[\"poutcome\"] = x_df.poutcome.apply(lambda s: 1 if s == \"success\" else 0)\n",
"\n",
" y_df = x_df.pop(\"y\").apply(lambda s: 1 if s == \"yes\" else 0)\n",
" return x_df, y_df"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"TODO: Create TabularDataset using TabularDatasetFactory\n",
"Data is located at:\n",
"\"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ds = TabularDatasetFactory.from_delimited_files(path= \"https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv\", header=True, validate=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"x, y = clean_data(ds)\n",
"\n",
"x_train, x_test, y_train, y_test = train_test_split(x,y, random_state=42, test_size=0.2)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"TODO: Split data into train and test sets."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## YOUR CODE HERE ###a"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"lines_to_next_cell": 1
},
"outputs": [],
"source": [
"run = Run.get_context()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def main():\n",
" # Add arguments to script\n",
" parser = argparse.ArgumentParser(description=\"hyperparameters of the logistic regression model\")\n",
"\n",
" parser.add_argument('--C', type=float, default=1.0,\n",
" help=\"Inverse of regularization strength. Smaller values cause stronger regularization\")\n",
" parser.add_argument('--max_iter', type=int,\n",
" default=100,\n",
" help=\"Maximum number of iterations to converge\")\n",
" \n",
" args = parser.parse_args()\n",
" \n",
"# C = 1.0\n",
"# max_iter = 100\n",
"# run.log(\"Regularization Strength:\", np.float(C))\n",
"# run.log(\"Max iterations:\", np.int(max_iter))\n",
"# model = LogisticRegression(C=C, max_iter=max_iter, penalty=\"l2\").fit(x_train, y_train)\n",
"\n",
" run.log(\"Regularization Strength:\", np.float(args.C))\n",
" run.log(\"Max iterations:\", np.int(args.max_iter))\n",
"\n",
" model = LogisticRegression(C=args.C, max_iter=args.max_iter).fit(x_train, y_train)\n",
"\n",
" accuracy = model.score(x_test, y_test)\n",
" run.log(\"Accuracy\", np.float(accuracy))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"if __name__ == '__main__':\n",
" main()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"jupytext": {
"cell_metadata_filter": "-all",
"notebook_metadata_filter": "-all",
"text_representation": {
"extension": ".py",
"format_name": "light"
}
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| 30.837696 | 199 | 0.550934 |
bc0397691b57a3a8e2710bc76dd06700f122e002
| 50,898 |
py
|
Python
|
parlai/tasks/task_list.py
|
min942773/parlai_wandb
|
1d9ba1a0df2199d0247cee8c4929a2598ac7e41a
|
[
"MIT"
] | null | null | null |
parlai/tasks/task_list.py
|
min942773/parlai_wandb
|
1d9ba1a0df2199d0247cee8c4929a2598ac7e41a
|
[
"MIT"
] | 7 |
2021-01-12T01:07:03.000Z
|
2022-03-12T00:50:45.000Z
|
parlai/tasks/task_list.py
|
min942773/parlai_wandb
|
1d9ba1a0df2199d0247cee8c4929a2598ac7e41a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This file contains a list of all the tasks, their id and task name, description and the
tags associated with them.
"""
task_list = [
{
"id": "AmazonQA",
"display_name": "AmazonQA",
"task": "amazon_qa",
"tags": ["All", "QA"],
"links": {"website": "http://jmcauley.ucsd.edu/data/amazon/qa/"},
"description": (
"This dataset contains Question and Answer data from Amazon, "
"totaling around 1.4 million answered questions."
),
},
{
"id": "AQuA",
"display_name": "AQuA",
"task": "aqua",
"tags": ["All", "QA"],
"links": {"arXiv": "https://arxiv.org/abs/1705.04146"},
"description": (
"Dataset containing algebraic word problems with rationales for "
"their answers."
),
},
{
"id": "bAbI-1k",
"display_name": "bAbI 1k",
"task": "babi:All1k",
"tags": ["All", "QA"],
"description": (
"20 synthetic tasks that each test a unique aspect of text and "
"reasoning, and hence test different capabilities of learning "
"models."
),
"links": {"arXiv": "http://arxiv.org/abs/1502.05698"},
"notes": (
"You can access just one of the bAbI tasks with e.g. "
"'babi:Task1k:3' for task 3."
),
},
{
"id": "bAbI-10k",
"display_name": "bAbI 10k",
"task": "babi:All10k",
"tags": ["All", "QA"],
"description": (
"20 synthetic tasks that each test a unique aspect of text and "
"reasoning, and hence test different capabilities of learning "
"models."
),
"links": {"arXiv": "http://arxiv.org/abs/1502.05698"},
"notes": (
"You can access just one of the bAbI tasks with e.g. 'babi:Task10k:3' "
"for task 3."
),
},
{
"id": "BlendedSkillTalk",
"display_name": "Blended Skill Talk",
"task": "blended_skill_talk",
"tags": ["All", "ChitChat"],
"description": (
"A dataset of 7k conversations explicitly designed to exhibit multiple "
"conversation modes: displaying personality, having empathy, and "
"demonstrating knowledge."
),
},
{
"id": "BookTest",
"display_name": "BookTest",
"task": "booktest",
"tags": ["All", "Cloze"],
"description": (
"Sentence completion given a few sentences as context from a book. "
"A larger version of CBT."
),
"links": {"arXiv": "https://arxiv.org/abs/1610.00956"},
},
{
"id": "BotAdversarialDialogue",
"display_name": "Bot Adversarial Dialogue ",
"task": "bot_adversarial_dialogue",
"tags": ["All"],
"description": (
"Datasets described in the paper Recipes for Safety in Open-domain Chatbots."
"Datasets consist of classification tasks in which the goal is to "
"determine if the utterance is offensive or not given a dialogue context. "
),
"links": {"arXiv": "<placeholder>"},
},
{
"id": "CBT",
"display_name": "Children's Book Test (CBT)",
"task": "cbt",
"tags": ["All", "Cloze"],
"description": (
"Sentence completion given a few sentences as context from a "
"children's book."
),
"links": {"arXiv": "https://arxiv.org/abs/1511.02301"},
},
{
"id": "CCPE",
"display_name": "Coached Conversational Preference Elicitation",
"task": "ccpe",
"tags": ["All", "Goal"],
"description": (
"A dataset consisting of 502 dialogs with 12,000 annotated "
"utterances between a user and an assistant discussing movie "
"preferences in natural language. It was collected using a "
"Wizard-of-Oz methodology between two paid crowd-workers, "
"where one worker plays the role of an 'assistant', while "
"the other plays the role of a 'user'."
),
"links": {
"website": "https://ai.google/tools/datasets/coached-conversational-preference-elicitation"
},
},
{
"id": "COPA",
"display_name": "Choice of Plausible Alternatives",
"task": "copa",
"tags": ["All", "Reasoning"],
"description": (
"The Choice Of Plausible Alternatives (COPA) evaluation provides "
"researchers with a tool for assessing progress in open-domain "
"commonsense causal reasoning. COPA consists of 1000 questions, "
"split equally into development and test sets of 500 questions each."
),
"links": {"website": "http://people.ict.usc.edu/~gordon/copa.html"},
},
{
"id": "COQA",
"display_name": "Conversational Question Answering Challenge",
"task": "coqa",
"tags": ["All", "QA"],
"description": (
"CoQA is a large-scale dataset for building Conversational "
"Question Answering systems. The goal of the CoQA challenge "
"is to measure the ability of machines to understand a text "
"passage and answer a series of interconnected questions that "
"appear in a conversation. CoQA is pronounced as coca."
),
"links": {"arXiv": "https://arxiv.org/abs/1808.07042"},
},
{
"id": "CornellMovie",
"display_name": "Cornell Movie",
"task": "cornell_movie",
"tags": ["All", "ChitChat", "Dodeca"],
"description": ("Fictional conversations extracted from raw movie scripts."),
"links": {"arXiv": "https://arxiv.org/abs/1106.3077"},
},
{
"id": "DBLL-bAbI",
"display_name": "Dialog Based Language Learning: bAbI Task",
"task": "dbll_babi",
"tags": ["All", "Goal"],
"description": (
"Short dialogs based on the bAbI tasks, but in the form of a "
"question from a teacher, the answer from the student, and finally a "
"comment on the answer from the teacher. The aim is to find learning "
"models that use the comments to improve."
),
"links": {"arXiv": "https://arxiv.org/abs/1604.06045"},
"notes": (
"Tasks can be accessed with a "
"format like: 'parlai display_data -t "
"dbll_babi:task:2_p0.5' which specifies task 2, and policy with 0.5 "
"answers correct, see the paper for more details of the tasks."
),
},
{
"id": "DBLL-Movie",
"display_name": "Dialog Based Language Learning: WikiMovies Task",
"task": "dbll_movie",
"tags": ["All", "Goal"],
"description": (
"Short dialogs based on WikiMovies, but in the form of a question "
"from a teacher, the answer from the student, and finally a comment "
"on the answer from the teacher. The aim is to find learning models "
"that use the comments to improve."
),
"links": {"arXiv": "https://arxiv.org/abs/1604.06045"},
},
{
"id": "dialog-bAbI",
"display_name": "Dialog bAbI",
"task": "dialog_babi",
"tags": ["All", "Goal"],
"description": "Simulated dialogs of restaurant booking",
"links": {"arXiv": "https://arxiv.org/abs/1605.07683"},
},
{
"id": "dialog-bAbI-plus",
"display_name": "Dialog bAbI+",
"task": "dialog_babi_plus",
"tags": ["All", "Goal"],
"description": (
"bAbI+ is an extension of the bAbI Task 1 dialogues with everyday "
"incremental dialogue phenomena (hesitations, restarts, and "
"corrections) which model the disfluencies and communication "
"problems in everyday spoken interaction in real-world environments. "
),
"links": {
"website": (
"https://www.researchgate.net/publication/"
"319128941_Challenging_Neural_Dialogue_Models_with_Natural_"
"Data_Memory_Networks_Fail_on_Incremental_Phenomena"
),
"paper": "http://aclweb.org/anthology/D17-1235",
},
},
{
"id": "dialogue-nli",
"display_name": "Dialogue NLI",
"task": "dialogue_nli",
"tags": ["All", "ChitChat", "NLI"],
"description": (
"Dialogue NLI is a dataset that addresses the issue of consistency in "
"dialogue models."
),
"links": {
"website": "https://wellecks.github.io/dialogue_nli/",
"arXiv": "https://arxiv.org/abs/1811.00671",
},
},
{
"id": "dstc7",
"display_name": "DSTC7 subtrack 1 - ubuntu",
"task": "dstc7",
"tags": ["All", "ChitChat"],
"description": (
"DSTC7 is a competition which provided a dataset of dialogs very "
"similar to the ubuntu dataset. In particular, the subtrack 1 "
"consists in predicting the next utterance."
),
"links": {"arXiv": "https://arxiv.org/abs/1901.03461"},
},
{
"id": "FVQA",
"display_name": "FVQA",
"task": "fvqa",
"tags": ["All", "Visual"],
"description": (
"The FVQA, a VQA dataset which requires, and supports, much deeper "
"reasoning. We extend a conventional visual question answering "
"dataset, which contains image-question-answer triplets, through "
"additional image-question-answer-supporting fact tuples. The "
"supporting fact is represented as a structural triplet, such as "
"<Cat,CapableOf,ClimbingTrees>."
),
"links": {"arXiv": "https://arxiv.org/abs/1606.05433"},
},
{
"id": "DealNoDeal",
"display_name": "Deal or No Deal",
"task": "dealnodeal",
"tags": ["All", "Negotiation"],
"description": (
"End-to-end negotiation task which requires two agents to agree on "
"how to divide a set of items, with each agent assigning different "
"values to each item."
),
"links": {"arXiv": "https://arxiv.org/abs/1706.05125"},
},
{
"id": "HotpotQA",
"display_name": "HotpotQA",
"task": "hotpotqa",
"tags": ["All", "QA"],
"description": (
"HotpotQA is a dataset for multi-hop question answering."
"The overall setting is that given some context paragraphs"
"(e.g., a few paragraphs, or the entire Web) and a question,"
"a QA system answers the question by extracting a span of text"
"from the context. It is necessary to perform multi-hop reasoning"
"to correctly answer the question."
),
"links": {"arXiv": "https://arxiv.org/abs/1809.09600"},
},
{
"id": "LIGHT-Dialogue",
"display_name": "LIGHT-Dialogue",
"task": "light_dialog",
"tags": ["All", "Grounded", "Dodeca"],
"description": (
"LIGHT is a text adventure game with actions and dialogue collected."
"The source data is collected between crowdworkers playing the game."
),
"links": {
"website": "http://parl.ai/projects/light",
"arXiv": "https://arxiv.org/abs/1903.03094",
},
},
{
"id": "LIGHT-Dialogue-Wild",
"display_name": "LIGHT-Dialogue-Wild",
"task": "light_dialog_wild",
"tags": ["All", "Grounded", "LIGHT"],
"description": (
" LIGHT is a text adventure game with actions and dialogue."
"The WILD dataset here features 41,131+ training episodes of dialogue "
"collected from deploying a game as described in "
),
"links": {
"arXiv": "https://arxiv.org/abs/2008.08076",
"website": "http://parl.ai/projects/light",
},
},
{
"id": "MutualFriends",
"display_name": "MutualFriends",
"task": "mutualfriends",
"tags": ["All", "Goal"],
"description": (
"Task where two agents must discover which friend of theirs is "
"mutual based on the friends's attributes."
),
"links": {"website": "https://stanfordnlp.github.io/cocoa/"},
},
{
"id": "MCTest",
"display_name": "MCTest",
"task": "mctest",
"tags": ["All", "QA"],
"description": ("Questions about short children's stories."),
"links": {
"website": (
"https://www.microsoft.com/en-us/research/publication/"
"mctest-challenge-dataset-open-domain-machine-comprehension-text/"
)
},
},
{
"id": "MovieDD-QA",
"display_name": "Movie Dialog QA",
"task": "moviedialog:Task:1",
"tags": ["All", "QA", "MovieDD"],
"description": (
"Closed-domain QA dataset asking templated questions about movies, "
"answerable from Wikipedia, similar to WikiMovies."
),
"links": {"arXiv": "https://arxiv.org/abs/1511.06931"},
},
{
"id": "MovieDD-QARecs",
"display_name": "Movie Dialog QA Recommendations",
"task": "moviedialog:Task:3",
"tags": ["All", "Goal", "MovieDD"],
"description": (
"Dialogs discussing questions about movies as well as recommendations."
),
"links": {"arXiv": "https://arxiv.org/abs/1511.06931"},
},
{
"id": "MovieDD-Recs",
"display_name": "Movie Dialog Recommendations",
"task": "moviedialog:Task:2",
"tags": ["All", "QA", "MovieDD"],
"description": ("Questions asking for movie recommendations."),
"links": {"arXiv": "https://arxiv.org/abs/1511.06931"},
},
{
"id": "MovieDD-Reddit",
"display_name": "Movie Dialog Reddit",
"task": "moviedialog:Task:4",
"tags": ["All", "ChitChat", "MovieDD"],
"description": (
"Dialogs discussing Movies from Reddit (the Movies SubReddit)."
),
"links": {"arXiv": "https://arxiv.org/abs/1511.06931"},
},
{
"id": "MTurkWikiMovies",
"display_name": "MTurk WikiMovies",
"task": "mturkwikimovies",
"tags": ["All", "QA"],
"description": (
"Closed-domain QA dataset asking MTurk-derived questions about "
"movies, answerable from Wikipedia."
),
"links": {"arXiv": "https://arxiv.org/abs/1611.09823"},
},
{
"id": "MultiNLI",
"display_name": "MultiNLI",
"task": "multinli",
"tags": ["All", "Entailment", "decanlp"],
"description": (
"A dataset designed for use in the development and evaluation of "
"machine learning models for sentence understanding. Each example "
"contains a premise and hypothesis. Model has to predict whether "
"premise and hypothesis entail, contradict or are neutral to each "
"other."
),
"links": {"arXiv": "https://arxiv.org/abs/1704.05426"},
},
{
"id": "NarrativeQA",
"display_name": "NarrativeQA",
"task": "narrative_qa",
"tags": ["All", "QA"],
"description": (
"A dataset and set of tasks in which the reader must answer "
"questions about stories by reading entire books or movie scripts. "
),
"links": {"arXiv": "https://arxiv.org/abs/1712.07040"},
"notes": (
"You can access summaries only task for NarrativeQA by using task "
"'narrative_qa:summaries'. By default, only stories are provided."
),
},
{
"id": "NaturalQuestions",
"display_name": "Natural Questions",
"task": "natural_questions",
"tags": ["All", "QA"],
"description": (
"An open domain question answering dataset. "
"Each example contains real questions that people searched "
"for in Google and the content of the a Wikipedia article that "
"was amongst the top 5 search resutls for that query, "
"and its annotations. The annotations have the options of a long "
"answer that is seleced from span of major content entities in "
"the Wikipedia article (e.g., paragraphs, tables), a short answer"
"that is selected from one or more short span of words in the "
"article, or 'yes/no'. The existence of any of these answer "
"formats depends on whether the main question can be answered, "
"given the article; if not they are left empty."
),
"links": {
"paper": "https://research.google/pubs/pub47761/",
"website": "https://ai.google.com/research/NaturalQuestions",
},
"notes": (
"Since this task uses ChunkTeacher, it should be used with streaming."
),
},
{
"id": "OpenSubtitles",
"display_name": "Open Subtitles",
"task": "opensubtitles",
"tags": ["All", "ChitChat"],
"description": "Dataset of dialogs from movie scripts.",
"links": {
"version 2018 website": "http://opus.lingfil.uu.se/OpenSubtitles2018.php",
"version 2009 website": "http://opus.lingfil.uu.se/OpenSubtitles.php",
"related work (arXiv)": "https://arxiv.org/abs/1506.05869",
},
},
{
"id": "personalized-dialog-full",
"display_name": "Personalized Dialog Full Set",
"task": "personalized_dialog:AllFull",
"tags": ["All", "Goal", "Personalization"],
"description": (
"Simulated dataset of restaurant booking focused on personalization "
"based on user profiles."
),
"links": {"arXiv": "https://arxiv.org/abs/1706.07503"},
},
{
"id": "personalized-dialog-small",
"display_name": "Personalized Dialog Small Set",
"task": "personalized_dialog:AllSmall",
"tags": ["All", "Goal", "Personalization"],
"description": (
"Simulated dataset of restaurant booking focused on personalization "
"based on user profiles."
),
"links": {"arXiv": "https://arxiv.org/abs/1706.07503"},
},
{
"id": "QACNN",
"display_name": "QA CNN",
"task": "qacnn",
"tags": ["All", "Cloze"],
"description": (
"Cloze dataset based on a missing (anonymized) entity phrase from a "
"CNN article"
),
"links": {"arXiv": "https://arxiv.org/abs/1506.03340"},
},
{
"id": "QADailyMail",
"display_name": "QA Daily Mail",
"task": "qadailymail",
"tags": ["All", "Cloze"],
"description": (
"Cloze dataset based on a missing (anonymized) entity phrase from a "
"Daily Mail article."
),
"links": {"arXiv": "https://arxiv.org/abs/1506.03340"},
},
{
"id": "QuAC",
"display_name": "Question Answering in Context",
"task": "quac",
"tags": ["All", "QA"],
"description": (
"Question Answering in Context is a dataset for modeling, "
"understanding, and participating in information seeking dialog. Data "
"instances consist of an interactive dialog between two crowd workers: "
"(1) a student who poses a sequence of freeform questions to learn as "
"much as possible about a hidden Wikipedia text, and (2) a teacher who "
"answers the questions by providing short excerpts (spans) from the text. "
"QuAC introduces challenges not found in existing machine comprehension "
"datasets: its questions are often more open-ended, unanswerable, "
"or only meaningful within the dialog context."
),
"links": {"arXiv": "https://arxiv.org/abs/1808.07036"},
},
{
"id": "SelfFeedingChatbot",
"display_name": "Self-Feeding Chatbot",
"task": "self_feeding",
"tags": ["diaexp", "diasen", "All"],
"description": (
"Learning from Dialogue after Deployment. Leveraging user textual "
"feedback to improve the chatbot's abilities."
),
"links": {"arXiv": "https://arxiv.org/abs/1901.05415"},
},
{
"id": "SimpleQuestions",
"display_name": "Simple Questions",
"task": "simplequestions",
"tags": ["All", "QA"],
"description": ("Open-domain QA dataset based on Freebase triples."),
"links": {"arXiv": "https://arxiv.org/abs/1506.02075"},
},
{
"id": "SNLI",
"display_name": "The Stanford Natural Language Inference (SNLI) Corpus",
"task": "snli",
"tags": ["All", "Entailment"],
"description": (
"The SNLI corpus (version 1.0) is a collection of 570k "
"human-written English sentence pairs manually labeled for balanced "
"classification with the labels entailment, contradiction, and "
"neutral, supporting the task of natural language inference (NLI), "
"also known as recognizing textual entailment (RTE)"
),
"links": {"website": "https://nlp.stanford.edu/projects/snli/"},
},
{
"id": "SQuAD2",
"display_name": "SQuAD2",
"task": "squad2",
"tags": ["All", "QA"],
"description": (
"Open-domain QA dataset answerable from a given paragraph from "
"Wikipedia."
),
"links": {"arXiv": "http://arxiv.org/abs/1806.03822"},
},
{
"id": "SQuAD",
"display_name": "SQuAD",
"task": "squad",
"tags": ["All", "QA"],
"description": (
"Open-domain QA dataset answerable from a given paragraph from "
"Wikipedia."
),
"links": {"arXiv": "https://arxiv.org/abs/1606.05250"},
},
{
"id": "TriviaQA",
"display_name": "TriviaQA",
"task": "triviaqa",
"tags": ["All", "QA"],
"description": (
"Open-domain QA dataset with question-answer-evidence triples."
),
"links": {"arXiv": "https://arxiv.org/abs/1705.03551"},
},
{
"id": "TaskNTalk",
"display_name": "Task N' Talk",
"task": "taskntalk",
"tags": ["All", "Goal"],
"description": (
"Dataset of synthetic shapes described by attributes, for agents to "
"play a cooperative QA game."
),
"links": {"arXiv": "https://arxiv.org/abs/1706.08502"},
},
{
"id": "Ubuntu",
"display_name": "Ubuntu",
"task": "ubuntu",
"tags": ["All", "ChitChat", "Dodeca"],
"description": (
"Dialogs between an Ubuntu user and an expert trying to fix issue, "
"we use the V2 version, which cleaned the data to some extent. "
),
"links": {"arXiv": "https://arxiv.org/abs/1506.08909."},
},
{
"id": "WebQuestions",
"display_name": "Web Questions",
"task": "webquestions",
"tags": ["All", "QA"],
"description": ("Open-domain QA dataset from Web queries."),
"links": {"paper": "http://www.aclweb.org/anthology/D13-1160"},
},
{
"id": "WikiMovies",
"display_name": "WikiMovies",
"task": "wikimovies",
"tags": ["All", "QA"],
"description": (
"Closed-domain QA dataset asking templated questions about movies, "
"answerable from Wikipedia."
),
"links": {"arXiv": "https://arxiv.org/abs/1606.03126"},
},
{
"id": "WikiQA",
"display_name": "WikiQA",
"task": "wikiqa",
"tags": ["All", "QA"],
"description": ("Open domain QA from Wikipedia dataset"),
"links": {
"website": (
"https://www.microsoft.com/en-us/research/publication/wikiqa-a-"
"challenge-dataset-for-open-domain-question-answering/"
)
},
},
{
"id": "VQAv1",
"display_name": "VQAv1",
"task": "vqa_v1",
"tags": ["All", "Visual"],
"description": ("Open-ended question answering about visual content."),
"links": {"arXiv": "https://arxiv.org/abs/1505.00468"},
},
{
"id": "VQAv2",
"display_name": "VQAv2",
"task": "vqa_v2",
"tags": ["All", "Visual"],
"description": ("Bigger, more balanced version of the original VQA dataset."),
"links": {"arXiv": "https://arxiv.org/abs/1612.00837"},
},
{
"id": "VisDial",
"display_name": "VisDial",
"task": "visdial",
"tags": ["All", "Visual"],
"description": (
"Task which requires agents to hold a meaningful dialog about "
"visual content."
),
"links": {"arXiv": "https://arxiv.org/abs/1611.08669"},
},
{
"id": "MNIST_QA",
"display_name": "MNIST_QA",
"task": "mnist_qa",
"tags": ["All", "Visual"],
"description": (
"Task which requires agents to identify which number they are "
"seeing. From the MNIST dataset."
),
},
{
"id": "InsuranceQA",
"display_name": "InsuranceQA",
"task": "insuranceqa",
"tags": ["All", "QA"],
"description": (
"Task which requires agents to identify high quality answers "
"composed by professionals with deep domain knowledge."
),
"links": {"arXiv": "https://arxiv.org/abs/1508.01585"},
},
{
"id": "MS_MARCO",
"display_name": "MS_MARCO",
"task": "ms_marco",
"tags": ["All", "QA"],
"description": (
"A large scale Machine Reading Comprehension Dataset with questions "
"sampled from real anonymized user queries and contexts from web "
"documents."
),
"links": {"arXiv": "https://arxiv.org/abs/1611.09268"},
},
{
"id": "CLEVR",
"display_name": "CLEVR",
"task": "clevr",
"tags": ["All", "Visual"],
"description": (
"A visual reasoning dataset that tests abilities such as attribute "
"identification, counting, comparison, spatial relationships, and "
"logical operations."
),
"links": {"arXiv": "https://arxiv.org/abs/1612.06890"},
},
{
"id": "nlvr",
"display_name": "nlvr",
"task": "nlvr",
"tags": ["All", "Visual"],
"description": (
"Cornell Natural Language Visual Reasoning (NLVR) is a language "
"grounding dataset based on pairs of natural language statements "
"grounded in synthetic images."
),
"links": {"website": "http://lic.nlp.cornell.edu/nlvr/"},
},
{
"id": "WMT",
"display_name": "WMT",
"task": "wmt",
"tags": ["All", "MT"],
"description": (
"Workshop on Machine Translation task, currently only includes en_de."
),
},
{
"id": "IWSLT14",
"display_name": "IWSLT14",
"task": "iwslt14",
"tags": ["All", "MT", "decanlp"],
"description": (
"2014 International Workshop on Spoken Language task, currently "
"only includes en_de and de_en."
),
"links": {"website": "https://wit3.fbk.eu"},
},
{
"id": "ConvAI2",
"display_name": "ConvAI2",
"task": "convai2",
"tags": ["All", "ChitChat", "Dodeca"],
"description": (
"A chit-chat dataset based on PersonaChat for a NIPS 2018 competition. "
),
"links": {
"arXiv": "https://arxiv.org/abs/1801.07243",
"website": "http://convai.io/",
},
},
{
"id": "ConvAI_ChitChat",
"display_name": "ConvAI_ChitChat",
"task": "convai_chitchat",
"tags": ["All", "ChitChat", "decanlp"],
"description": (
"Human-bot dialogues containing free discussions of randomly chosen "
"paragraphs from SQuAD."
),
"links": {"website": "http://convai.io/data/"},
},
{
"id": "Dialogue_QE",
"display_name": "Dialogue_QE",
"task": "dialogue_qe",
"tags": ["All"],
"description": (
"Human-bot dialogues labelled for quality at the level of "
"dialogues. Can be used to train dialogue-level metric for dialogue "
"systems."
),
},
{
"id": "QAngaroo",
"display_name": "QAngaroo",
"task": "qangaroo",
"tags": ["All", "QA"],
"description": (
"Reading Comprehension with Multiple Hop. Including two datasets: "
"WIKIHOP built on on wikipedia, MEDHOP built on paper abstracts from "
"PubMed."
),
"links": {"website": "http://qangaroo.cs.ucl.ac.uk/"},
},
{
"id": "SCAN",
"display_name": "SCAN",
"task": "scan",
"tags": ["Goal", "All"],
"description": (
"SCAN is a set of simple language-driven navigation tasks for "
"studying compositional learning and zero-shot generalization. The "
"SCAN tasks were inspired by the CommAI environment, which is the "
"origin of the acronym (Simplified versions of the CommAI Navigation "
"tasks)."
),
"links": {
"arXiv": "https://arxiv.org/abs/1711.00350",
"website": "https://github.com/brendenlake/SCAN",
},
},
{
"id": "Persona-Chat",
"display_name": "Persona-Chat",
"task": "personachat",
"tags": ["ChitChat", "All"],
"description": (
"A chit-chat dataset where paired Turkers are given assigned "
"personas and chat to try to get to know each other."
),
"links": {"arXiv": "https://arxiv.org/abs/1801.07243"},
},
{
"id": "TaskMaster",
"display_name": "TaskMaster-1-2019",
"task": "taskmaster",
"tags": ["ChitChat", "All"],
"description": (
"A chit-chat dataset by GoogleAI providing high quality goal-oriented conversations"
"The dataset hopes to provoke interest in written vs spoken language"
"Both the datasets consists of two-person dialogs:"
"Spoken: Created using Wizard of Oz methodology."
"Written: Created by crowdsourced workers who were asked to write the "
"full conversation themselves playing roles of both the user and assistant."
),
"links": {"website": "https://ai.google/tools/datasets/taskmaster-1"},
},
{
"id": "Twitter",
"display_name": "Twitter",
"task": "twitter",
"tags": ["All", "ChitChat", "Dodeca"],
"description": (
"Twitter data found on GitHub. No "
"train/valid/test split was provided so 10k for valid and 10k for "
"test was chosen at random."
),
"links": {"website": "https://github.com/Marsan-Ma/chat_corpus/"},
},
{
"id": "Wikipedia",
"display_name": "Wikipedia",
"task": 'wikipedia',
"tags": ["All"],
"description": ("Dump of Wikipedia articles from 2/3/18"),
"notes": (
"Specify ':full' for the full articles to be returned, otherwise "
"defaults to ':summary', which provides the first paragraphs. To put "
"the article in the labels and the title in the text, specify "
"':key-value' at the end (for a title/content key-value "
"association)"
),
},
{
"id": "Flickr30k",
"display_name": "Flickr30k",
"task": "flickr30k",
"tags": ["All", "Visual"],
"description": ("30k captioned images pulled from Flickr compiled by UIUC. "),
"links": {
"website": "http://web.engr.illinois.edu/~bplumme2/Flickr30kEntities/",
"paper1": "https://arxiv.org/abs/1505.04870v2",
"paper2": "http://aclweb.org/anthology/Q14-1006",
},
},
{
"id": "COCO_Captions",
"display_name": "COCO_Captions",
"task": "coco_caption",
"tags": ["All", "Visual"],
"description": (
"COCO annotations derived from the 2015 COCO Caption Competition. "
),
"links": {"website": "http://cocodataset.org/"},
},
{
"id": "integration_tests",
"display_name": "Integration Tests",
"task": "integration_tests",
"tags": ["All", "Debug"],
"description": ("Artificial tasks for ensuring models perform as expected"),
},
{
"id": "ConvAI2_wild_evaluation",
"display_name": "ConvAI2_wild_evaluation",
"task": "convai2_wild_evaluation",
"tags": ["All", "ChitChat"],
"description": (
"Dataset collected during the wild evaluation of ConvaAI2 participants "
"bots. 60% train, 20% valid and 20% test is chosen at "
"random from the whole dataset."
),
"links": {"website": "http://convai.io"},
},
{
"id": "sst",
"display_name": "SST Sentiment Analysis",
"task": "sst",
"tags": ["All", "decanlp"],
"description": (
"Dataset containing sentiment trees of movie reviews. We use the modified "
"binary sentence analysis subtask given by the DecaNLP paper here."
),
"links": {
"website": "https://nlp.stanford.edu/sentiment/index.html",
"website2": "https://github.com/openai/generating-reviews-discovering-sentiment/",
},
},
{
"id": "cnn_dm",
"display_name": "CNN/DM Summarisation",
"task": "cnn_dm",
"tags": ["All", "decanlp"],
"description": (
"Dataset collected from CNN and the Daily Mail with summaries as labels, "
"Implemented as part of the DecaNLP task."
),
"links": {"website": "https://cs.nyu.edu/~kcho/DMQA/"},
},
{
"id": "qasrl",
"display_name": "QA-SRL Semantic Role Labeling",
"task": "qasrl",
"tags": ["All", "decanlp"],
"description": ("QA dataset implemented as part of the DecaNLP task."),
"links": {"website": "https://dada.cs.washington.edu/qasrl/"},
},
{
"id": "qazre",
"display_name": "QA-ZRE Relation Extraction",
"task": "qazre",
"tags": ["All", "decanlp"],
"description": (
"Zero Shot relation extraction task implemented as part of the DecaNLP "
"task."
),
"links": {"website": "http://nlp.cs.washington.edu/zeroshot/"},
},
{
"id": "woz",
"display_name": "WOZ restuarant reservation (Goal-Oriented Dialogue)",
"task": "woz",
"tags": ["All", "decanlp"],
"description": (
"Dataset containing dialogues dengotiating a resturant reservation. "
"Implemented as part of the DecaNLP task, focused on the change "
"in the dialogue state."
),
"links": {"arXiv": "https://arxiv.org/abs/1604.04562"},
},
{
"id": "wikisql",
"display_name": "WikiSQL semantic parsing task",
"task": "wikisql",
"tags": ["All", "decanlp"],
"description": (
"Dataset for parsing sentences to SQL code, given a table. "
"Implemented as part of the DecaNLP task."
),
"links": {"website": "https://github.com/salesforce/WikiSQL"},
},
{
"id": "mwsc",
"display_name": "MWSC pronoun resolution",
"task": "mwsc",
"tags": ["All", "decanlp"],
"description": (
"Resolving possible ambiguous pronouns. "
"Implemented as part of the DecaNLP "
"task, and can be found on the decaNLP github."
),
"links": {"website": "https://github.com/salesforce/decaNLP"},
},
{
"id": "decanlp",
"display_name": "DecaNLP: The Natural Language Decathlon",
"task": "decanlp",
"tags": ["All"],
"description": (
"A collection of 10 tasks (SQuAD, IWSLT, CNN/DM, MNLI, SST, QA‑SRL,"
"QA‑ZRE, WOZ, WikiSQL and MWSC) designed to challenge a model with a range "
"of different tasks. Note that we use IWSLT 2014 instead of "
"2016/2013test/2014test for train/dev/test as given in the DecaNLP paper. "
),
"links": {
"arXiv": "https://arxiv.org/abs/1806.08730",
"github": "https://github.com/salesforce/decaNLP",
},
},
{
"id": "Personality_Captions",
"display_name": "Personality_Captions",
"task": "personality_captions",
"tags": ["All", "Visual"],
"description": (
"200k images from the YFCC100m dataset "
"with captions conditioned on one of 215 personalities."
),
"links": {
"website": "https://multimediacommons.wordpress.com/yfcc100m-core-dataset/",
"arXiv": "https://arxiv.org/abs/1810.10665",
},
"notes": (
"If you have already downloaded the images, please specify with "
"the `--yfcc-path` flag, as the image download script takes a "
"very long time to run"
),
},
{
"id": "Image_Chat",
"display_name": "Image_Chat",
"task": "image_chat",
"tags": ["All", "Visual", "ChitChat"],
"description": (
"202k dialogues and 401k utterances over 202k images from "
"the YFCC100m dataset "
"using 215 possible personality traits"
),
"links": {
"website": "https://klshuster.github.io/image_chat/",
"website2": "https://multimediacommons.wordpress.com/yfcc100m-core-dataset/",
},
"notes": (
"If you have already downloaded the images, please specify with "
"the `--yfcc-path` flag, as the image download script takes a "
"very long time to run"
),
},
{
"id": "Image_Chat_Generation",
"display_name": "Image_Chat_Generation",
"task": "image_chat:Generation",
"tags": ["All", "Visual", "ChitChat", "Dodeca"],
"description": ("Image Chat task to train generative model"),
},
{
"id": "Wizard_of_Wikipedia",
"display_name": "Wizard_of_Wikipedia",
"task": "wizard_of_wikipedia",
"tags": ["All", "ChitChat"],
"description": (
"A dataset with conversations directly grounded with knowledge "
"retrieved from Wikipedia. Contains 201k utterances from 22k "
"dialogues spanning over 1300 diverse topics, split into train, "
"test, and valid sets. The test and valid sets are split "
"into two sets each: one with overlapping topics with the train "
"set, and one with unseen topics."
),
"links": {"arXiv": "https://arxiv.org/abs/1811.01241"},
"notes": (
"To access the different valid/test splits (unseen/seen), specify "
"the corresponding split (`random_split` for seen, `topic_split` "
"for unseen) after the last colon in the task. "
"E.g. `wizard_of_wikipedia:WizardDialogKnowledgeTeacher:random_split`"
),
},
{
"id": "Wizard_of_Wikipedia_Generator",
"display_name": "Wizard_of_Wikipedia_Generator",
"task": "wizard_of_wikipedia:Generator",
"tags": ["All", "ChitChat", "Dodeca"],
"description": ("Wizard of Wikipedia task to train generative models"),
},
{
"id": "DailyDialog",
"display_name": "Daily Dialog",
"task": "dailydialog",
"tags": ["All", "ChitChat", "Dodeca"],
"description": (
"A dataset of chitchat dialogues with strong annotations for "
"topic, emotion and utterance act. This version contains both sides "
"of every conversation, and uses the official train/valid/test splits "
"from the original authors."
),
"links": {"arXiv": "https://arxiv.org/abs/1710.03957"},
},
{
"id": "EmpatheticDialogues",
"display_name": "Empathetic Dialogues",
"task": "empathetic_dialogues",
"tags": ["All", "ChitChat", "Dodeca"],
"description": (
"A dataset of 25k conversations grounded in emotional situations "
"to facilitate training and evaluating dialogue systems."
"Dataset has been released under the CC BY-NC license."
),
"links": {"arXiv": "https://arxiv.org/abs/1811.00207"},
"notes": (
"EmpatheticDialoguesTeacher returns examples like so: \n\n"
" - [text]: context line (previous utterance by 'speaker') \n"
" - [labels]: label line (current utterance by 'listener') \n\n"
"with additional task specific fields: \n\n"
" - [situation]: a 1-3 sentence description of the situation that the conversation is \n"
" - [emotion]: one of 32 emotion words \n\n"
"Other optional fields: \n\n"
" - [prepend_ctx]: fasttext prediction on context line - or None \n"
" - [prepend_cand]: fasttext prediction on label line (candidate) - or None \n"
" - [deepmoji_ctx]: vector encoding from deepmoji penultimate layer - or None \n"
" - [deepmoji_cand]: vector encoding from deepmoji penultimate layer for label line (candidate) - or None "
),
},
{
"id": "DialogueSafety",
"display_name": "Dialogue Safety",
"task": "dialogue_safety",
"tags": ["All"],
"description": (
"Several datasets described in the paper Built it Break it Fix it "
"for Dialogue Safety: Robustness from Adversarial Human Attack. "
"All datasets are classification tasks in which the goal is to "
"determine if the text is offensive or 'safe'."
),
"links": {"arXiv": "https://arxiv.org/abs/1908.06083"},
},
{
"id": "MultiWOZv2.0",
"display_name": "MultiWOZ 2.0",
"task": "multiwoz_v20",
"tags": ["All", "Goal"],
"description": (
"A fully labeled collection of human-written conversations spanning"
"over multiple domains and topics."
),
"links": {"website": "http://dialogue.mi.eng.cam.ac.uk/index.php/corpus/"},
},
{
"id": "MultiWOZv2.1",
"display_name": "MultiWOZ 2.1",
"task": "multiwoz_v21",
"tags": ["All", "Goal"],
"description": (
"A fully labeled collection of human-written conversations spanning"
"over multiple domains and topics."
),
"links": {"website": "http://dialogue.mi.eng.cam.ac.uk/index.php/corpus/"},
},
{
"id": "SelfChat",
"display_name": "SelfChat",
"task": "self_chat",
"tags": [],
"description": "Not a dataset, but a generic world for model self-chats.",
},
{
"id": "OneCommon",
"display_name": "OneCommon",
"task": "onecommon",
"tags": ["All", "Goal"],
"description": (
"A collaborative referring task which requires advanced skills "
"of common grounding under continuous and partially-observable context. "
"This code also includes reference-resolution annotation."
),
"links": {"website": "https://github.com/Alab-NII/onecommon"},
},
{
"id": "IGC",
"display_name": "Image Grounded Conversations",
"task": "igc",
"tags": ["All", "Visual", "ChitChat", "Dodeca"],
"description": (
"A dataset of (image, context, question, answer) tuples, comprised "
"of eventful images taken from Bing, Flickr, and COCO."
),
"links": {"arXiv": "https://arxiv.org/abs/1701.08251"},
},
{
"id": "ANLI",
"display_name": "Adversarial Natural Language Inference (ANLI) Corpus",
"task": "anli",
"tags": ["All", "Entailment", "NLI"],
"description": (
"The ANLI corpus (version 1.0) is a new large-scale NLI benchmark dataset,"
"collected via an iterative, adversarial human-and-model-in-the-loop procedure"
"with the labels entailment, contradiction, and neutral. A total of three rounds "
"of data are collected that progressively increase in difficulty and complexity."
),
"links": {
"github": "https://github.com/facebookresearch/anli",
"arXiv": "https://arxiv.org/abs/1910.14599",
},
},
{
"id": "NLI",
"display_name": "Natural Language Inference (NLI) Corpus",
"task": "nli",
"tags": ["All", "Entailment"],
"description": (
"A collection of 3 popular Natural Language Inference(NLI) benchmark tasks: "
"ANLI v0.1, MultiNLI 1.0, SNLI 1.0."
),
},
{
"id": "Funpedia",
"display_name": "Funpedia",
"task": "funpedia",
"tags": ["All"],
"description": (
"Task for rephrasing sentences from Wikipedia conditioned on a persona."
),
},
{
"id": "LIGHTGenderBias",
"display_name": "LIGHT Gender Bias",
"task": "light_genderation_bias",
"tags": ["All"],
"description": ("Task for debiasing the LIGHT dataset."),
"links": {"arXiv": "https://arxiv.org/abs/1911.03842"},
},
{
"id": "AirDialogue",
"display_name": "AirDialogue",
"task": "airdialogue",
"tags": ["All", "Goal"],
"description": (
"Task for goal-oriented dialogue using airplane booking conversations "
"between agents and customers."
),
"links": {"website": "https://github.com/google/airdialogue"},
},
{
"id": "HollE",
"display_name": "Holl-E",
"task": "holl_e",
"tags": ["All", "ChitChat"],
"description": (
"Sequence of utterances and responses with background knowledge about"
"movies. From the Holl-E dataset."
),
"links": {"website": "https://github.com/nikitacs16/Holl-E"},
},
{
"id": "ELI5",
"display_name": "ELI5",
"task": "eli5",
"tags": ["All", "QA"],
"description": (
"This dataset contains Question and Answer data from Reddit "
"explainlikeimfive posts and comments."
),
"links": {"website": "https://github.com/facebookresearch/ELI5/"},
},
{
"id": "ReDial",
"display_name": "ReDial",
"task": "redial",
"tags": ["All", "ChitChat", "Goal"],
"description": (
"Annotated dataset of dialogues where users recommend movies to each other."
),
"links": {"website": "https://redialdata.github.io/website/"},
},
{
"id": "DREAM",
"display_name": "DREAM",
"task": "dream",
"tags": ["All", "QA"],
"description": (
"A multiple-choice answering dataset based on multi-turn, multi-party dialogue."
),
"links": {"website": "https://dataset.org/dream/"},
},
{
"id": "C3",
"display_name": "C3",
"task": "c3",
"tags": ["All", "QA"],
"description": (
"A multiple-choice answering dataset in Chinese based on a prior passage."
),
"links": {"website": "https://dataset.org/c3/"},
},
{
"id": "CommonSenseQA",
"display_name": "CommonSenseQA",
"task": "commonsenseqa",
"tags": ["All", "QA"],
"description": (
"CommonSenseQA is a multiple-choice Q-A dataset that relies on commonsense "
"knowlegde to predict correct answers."
),
"links": {"wesite": "https://www.tau-nlp.org/commonsenseqa"},
},
{
"id": "StyleGen",
"display_name": "Style-Controlled Generation",
"task": "style_gen",
"tags": ["All", "ChitChat"],
"description": (
"Dialogue datasets (BlendedSkillTalk, ConvAI2, EmpatheticDialogues, and "
"Wizard of Wikipedia) labeled with personalities taken from the Image-Chat "
"dataset. Used for the style-controlled generation project"
),
},
{
"id": "GoogleSGD",
"display_name": "GoogleSGD",
"task": "google_sgd",
"tags": ["All", "Goal"],
"description": (
"The Schema-Guided Dialogue (SGD) dataset consists of over 20k "
"annotated multi-domain, task-oriented conversations between a "
"human and a virtual assistant."
),
},
{
"id": "TaskMaster2",
"display_name": "TaskMaster2",
"task": "taskmaster2",
"tags": ["All", "Goal"],
"description": (
"The second version of TaskMaster, containing Wizard-of-Oz dialogues "
"for task oriented dialogue in 7 domains."
),
},
{
"id": "GenderationBiasControlTask",
"display_name": "GenderationBiasControlTask",
"task": "genderation_bias:controllable_task",
"tags": ["All"],
"description": (
"A teacher that wraps other ParlAI tasks and appends control tokens to the "
"text field indicating the presence of gender words in the label(s)."
),
},
{
"id": "MDGender",
"display_name": "MD Gender",
"task": "md_gender",
"tags": ["All"],
"description": (
"Tasks for the multi-dimensional gender bias classifier training."
),
"links": {"arXiv": "https://arxiv.org/abs/2005.00614"},
},
]
| 37.842379 | 120 | 0.537231 |
a93e1d69f0379d0e76357f7dcd4fc4d912fff707
| 2,580 |
py
|
Python
|
netket/nn/__init__.py
|
inailuig/netket
|
ab57a6fb019edb9ac298969950724781f2ae2b22
|
[
"Apache-2.0"
] | null | null | null |
netket/nn/__init__.py
|
inailuig/netket
|
ab57a6fb019edb9ac298969950724781f2ae2b22
|
[
"Apache-2.0"
] | 2 |
2022-02-16T10:57:01.000Z
|
2022-02-16T10:57:10.000Z
|
netket/nn/__init__.py
|
inailuig/netket
|
ab57a6fb019edb9ac298969950724781f2ae2b22
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flax as _flax
from .activation import (
celu,
elu,
gelu,
glu,
leaky_relu,
log_sigmoid,
log_softmax,
relu,
sigmoid,
soft_sign,
softmax,
softplus,
swish,
silu,
tanh,
cosh,
sinh,
logcosh,
logsinh,
logtanh,
)
from flax.linen import (
MultiHeadDotProductAttention,
SelfAttention,
dot_product_attention,
make_attention_mask,
make_causal_mask,
combine_masks,
)
from .linear import (
Conv,
ConvTranspose,
Dense,
DenseGeneral,
DenseSymm,
DenseEquivariant,
)
from .module import Module
from flax.linen.module import compact, enable_named_call, disable_named_call, Variable
from .initializers import zeros, ones
from flax.linen import Embed
from flax.linen import compact
def to_array(hilbert, machine, params, normalize=True):
import numpy as np
from jax import numpy as jnp
from netket.utils import get_afun_if_module
machine = get_afun_if_module(machine)
if hilbert.is_indexable:
xs = hilbert.all_states()
psi = machine(params, xs)
logmax = psi.real.max()
psi = jnp.exp(psi - logmax)
if normalize:
norm = jnp.linalg.norm(psi)
psi /= norm
return psi
else:
raise RuntimeError("The hilbert space is not indexable")
def to_matrix(hilbert, machine, params, normalize=True):
import numpy as np
from jax import numpy as jnp
from netket.utils import get_afun_if_module
machine = get_afun_if_module(machine)
if hilbert.is_indexable:
xs = hilbert.all_states()
psi = machine(params, xs)
logmax = psi.real.max()
psi = jnp.exp(psi - logmax)
L = hilbert.physical.n_states
rho = psi.reshape((L, L))
if normalize:
trace = jnp.trace(rho)
rho /= trace
return rho
else:
raise RuntimeError("The hilbert space is not indexable")
| 23.669725 | 86 | 0.668605 |
72db496cb4bb2099e0caef9a8a35d09f0babb0ef
| 4,394 |
py
|
Python
|
src/bq_test_kit/data_literal_transformers/dsv_data_literal_transformer.py
|
tiboun/python-bigquery-test-kit
|
8f62bdf21122b615f56088a8e2701e0bb4c71f3b
|
[
"MIT"
] | 31 |
2021-03-03T21:07:44.000Z
|
2022-03-20T22:00:45.000Z
|
src/bq_test_kit/data_literal_transformers/dsv_data_literal_transformer.py
|
tiboun/python-bq-test-kit
|
8f62bdf21122b615f56088a8e2701e0bb4c71f3b
|
[
"MIT"
] | 14 |
2020-11-25T20:45:31.000Z
|
2021-01-29T13:06:28.000Z
|
src/bq_test_kit/data_literal_transformers/dsv_data_literal_transformer.py
|
tiboun/python-bq-test-kit
|
8f62bdf21122b615f56088a8e2701e0bb4c71f3b
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020 Bounkong Khamphousone
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
# C0114 disabled because this module contains only one class
# pylint: disable=C0114
import csv
from copy import deepcopy
from typing import Callable, List, Optional, Union
from google.cloud.bigquery.schema import SchemaField
from bq_test_kit.data_literal_transformers.base_data_literal_transformer import \
BaseDataLiteralTransformer
from bq_test_kit.resource_loaders.base_resource_loader import \
BaseResourceLoader
class DsvDataLiteralTransformer(BaseDataLiteralTransformer):
"""Loader of Delimiter-Seperated Value data. By default, it's CSV.
"""
def __init__(self):
"""
Constructor of DsvDataLiteralTransformer.
Config transformer to load CSV file by default.
"""
super().__init__()
self.field_delimiter = ","
self.quote_character = "\""
self.escape_character = "\\"
self.leading_rows_to_skip = 0
def with_field_delimiter(self, delimiter: str):
"""The field's separator.
Args:
delimiter (str): delimiter to use.
Returns:
DsvDataLiteralTransformer: new instance of DsvDataLiteralTransformer with updated field_delimiter.
"""
new_ddlt = deepcopy(self)
new_ddlt.field_delimiter = delimiter
return new_ddlt
def with_quote_character(self, char: str):
"""Character used to quote data sections
Args:
char (str): a character.
Returns:
DsvDataLiteralTransformer: new instance of DsvDataLiteralTransformer with updated quote character.
"""
new_ddlt = deepcopy(self)
new_ddlt.quote_character = char
return new_ddlt
def with_escape_character(self, char: str):
"""Character used to quote data sections
Args:
char (str): a character.
Returns:
DsvDataLiteralTransformer: new instance of DsvDataLiteralTransformer with updated escape character.
"""
new_ddlt = deepcopy(self)
new_ddlt.escape_character = char
return new_ddlt
def skip_leading_rows(self, nb_lines: int):
"""Number of rows to skip from the beginning of the file.
Args:
nb_lines (int): number of lines
Returns:
DsvDataLiteralTransformer: new instance of DsvDataLiteralTransformer with updated leading rows to skip.
"""
new_ddlt = deepcopy(self)
new_ddlt.leading_rows_to_skip = nb_lines
return new_ddlt
def _load(self, datum: Union[BaseResourceLoader, str, List[str]],
schema_fields: List[SchemaField],
transform_field_name: Optional[Callable[[str], str]]) -> str:
"""
Load a dvs inputs and transform them as data literal, preserving target schema with a fullfilled line.
This fullfilled line is, of course, discarded from the literal datum.
Extra columns are put in another column named __extra-columns__.
Args:
datum (Union[BaseResourceLoader, str, List[str], None]):
datum in a file or a string containing lines of datum or a list of data.
schema List[SchemaField]:
schema to match with while transforming data to literal.
Raises:
DataLiteralTransformException:
raised when an input data could not be transformed
as data literal with schema match.
Returns:
str: data literal
"""
csv_lines = self._load_lines_as_array(datum)
data_csv_lines = csv_lines[self.leading_rows_to_skip:]
if data_csv_lines:
rows = csv.DictReader(
data_csv_lines,
fieldnames=[f.name for f in schema_fields],
delimiter=self.field_delimiter,
quotechar=self.quote_character,
escapechar=self.escape_character,
doublequote=False,
skipinitialspace=False,
quoting=csv.QUOTE_MINIMAL,
strict=True,
restkey="__extra-columns__"
)
return self._to_data_literal(rows, schema_fields, transform_field_name)
return self.load([], schema_fields)
| 34.328125 | 115 | 0.641557 |
e692bf925ba0352186dcf08de03c0ddd5087542d
| 137 |
py
|
Python
|
botlistbot/api/config.py
|
anandpskerala/BotListBot
|
4ac1b1f7c4f4d251c80a24306542001f40b85216
|
[
"MIT"
] | 66 |
2017-07-21T07:16:14.000Z
|
2022-02-13T03:52:52.000Z
|
botlistbot/api/config.py
|
anandpskerala/BotListBot
|
4ac1b1f7c4f4d251c80a24306542001f40b85216
|
[
"MIT"
] | 10 |
2017-10-20T00:51:43.000Z
|
2021-06-02T00:07:32.000Z
|
botlistbot/api/config.py
|
anandpskerala/BotListBot
|
4ac1b1f7c4f4d251c80a24306542001f40b85216
|
[
"MIT"
] | 44 |
2018-01-05T15:01:47.000Z
|
2022-02-10T20:32:41.000Z
|
SECURITY_PASSWORD_HASH = 'pbkdf2_sha512'
SECURITY_TRACKABLE = True
SECURITY_PASSWORD_SALT = 'something_super_secret_change_in_production'
| 45.666667 | 70 | 0.890511 |
2c74c14c2cba44b6c01bf4a660a27564374d28de
| 2,195 |
py
|
Python
|
datasette_git_importer/git_utils.py
|
brandonrobertz/datasette-git-importer
|
f2b367dba56dcba1355dfd5dc18e237c6320925d
|
[
"Apache-2.0"
] | null | null | null |
datasette_git_importer/git_utils.py
|
brandonrobertz/datasette-git-importer
|
f2b367dba56dcba1355dfd5dc18e237c6320925d
|
[
"Apache-2.0"
] | null | null | null |
datasette_git_importer/git_utils.py
|
brandonrobertz/datasette-git-importer
|
f2b367dba56dcba1355dfd5dc18e237c6320925d
|
[
"Apache-2.0"
] | null | null | null |
# from datetime import datetime
import os
from git import Repo
def get_repo_remote(repo_owner, repo_name, github_user, github_token):
return f"https://{github_user}:{github_token}@github.com/{repo_owner}/{repo_name}"
def write_csv_to_repo(filename, data, plugin_config):
url = get_repo_remote(
plugin_config["repo_owner"],
plugin_config["repo_name"],
plugin_config["github_user"],
plugin_config["github_token"]
)
repo_dir = plugin_config.get("repo_dir", "/tmp/nextli-datasette")
repo_path = os.path.abspath(repo_dir)
print(f"Repo Path: {repo_path}")
if not os.path.exists(repo_path):
os.makedirs(repo_path, exist_ok=True)
repo = Repo.init(repo_path, mkdir=True)
if not len(repo.remotes):
print(f"Setting up origin => {url}")
repo.remotes.append(repo.create_remote("origin", url))
assert not repo.bare
print("Fetching origin")
repo.remotes.origin.fetch()
print("Pulling origin")
repo.remotes.origin.pull("main")
print("Checking out main")
repo.git.checkout("main")
print("Hard resetting head")
repo.git.reset("origin/main", hard=True)
# now = datetime.now().strftime("%Y-%m-%d-%H%M")
# branch_name = f"{filename}-{now}"
# repo.git.checkout(b=branch_name)
# print(f"Checked out new branch: {branch_name}")
assert not repo.is_dirty()
# newpath = os.path.join(repo_path, "config")
# os.makedirs(newpath)
print("Writing CSV")
newfilepath = os.path.join(repo_path, "csvs", filename)
print(f"Writing file {newfilepath}")
with open(newfilepath, "w") as f:
f.write(data.decode("utf-8"))
# if not repo.is_dirty() and not len(repo.untracked_files):
# print("No changes! Exiting.")
# return
print("We have unstaged changes, creating commit")
repo.index.add([newfilepath])
commit = repo.index.commit(f"Git importer: {filename}")
repo.git.push("origin", "main")
print("Commit SHA", commit.hexsha)
return commit.hexsha
if __name__ == "__main__":
head_sha = write_csv_to_repo("test.csv", "name,species\nbrandon,human\nkai,human")
print(f"HEAD SHA: {head_sha}")
| 30.068493 | 86 | 0.66287 |
ad0de2076d5a3964b148c2945cf1ca2f84f52905
| 16,669 |
py
|
Python
|
indico/modules/events/registration/controllers/display.py
|
uxmaster/indico
|
ecd19f17ef6fdc9f5584f59c87ec647319ce5d31
|
[
"MIT"
] | 1 |
2019-11-03T11:34:16.000Z
|
2019-11-03T11:34:16.000Z
|
indico/modules/events/registration/controllers/display.py
|
NP-compete/indico
|
80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549
|
[
"MIT"
] | null | null | null |
indico/modules/events/registration/controllers/display.py
|
NP-compete/indico
|
80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from operator import attrgetter
from uuid import UUID
from flask import flash, jsonify, redirect, request, session
from sqlalchemy.orm import contains_eager, subqueryload
from werkzeug.exceptions import Forbidden, NotFound
from indico.modules.auth.util import redirect_to_login
from indico.modules.events.controllers.base import RHDisplayEventBase
from indico.modules.events.models.events import EventType
from indico.modules.events.payment import payment_event_settings
from indico.modules.events.registration import registration_settings
from indico.modules.events.registration.controllers import RegistrationEditMixin, RegistrationFormMixin
from indico.modules.events.registration.models.forms import RegistrationForm
from indico.modules.events.registration.models.invitations import InvitationState, RegistrationInvitation
from indico.modules.events.registration.models.items import PersonalDataType
from indico.modules.events.registration.models.registrations import Registration, RegistrationState
from indico.modules.events.registration.util import (check_registration_email, create_registration, generate_ticket,
get_event_regforms, get_event_section_data, get_title_uuid,
make_registration_form)
from indico.modules.events.registration.views import (WPDisplayRegistrationFormConference,
WPDisplayRegistrationFormSimpleEvent,
WPDisplayRegistrationParticipantList)
from indico.util.fs import secure_filename
from indico.util.i18n import _
from indico.web.flask.util import send_file, url_for
class RHRegistrationFormDisplayBase(RHDisplayEventBase):
@property
def view_class(self):
return (WPDisplayRegistrationFormConference
if self.event.type_ == EventType.conference
else WPDisplayRegistrationFormSimpleEvent)
class RHRegistrationFormBase(RegistrationFormMixin, RHRegistrationFormDisplayBase):
def _process_args(self):
RHRegistrationFormDisplayBase._process_args(self)
RegistrationFormMixin._process_args(self)
class RHRegistrationFormRegistrationBase(RHRegistrationFormBase):
"""Base for RHs handling individual registrations"""
REGISTRATION_REQUIRED = True
def _process_args(self):
RHRegistrationFormBase._process_args(self)
self.token = request.args.get('token')
if self.token:
self.registration = self.regform.get_registration(uuid=self.token)
if not self.registration:
raise NotFound
else:
self.registration = self.regform.get_registration(user=session.user) if session.user else None
if self.REGISTRATION_REQUIRED and not self.registration:
raise Forbidden
class RHRegistrationFormList(RHRegistrationFormDisplayBase):
"""List of all registration forms in the event"""
def _process(self):
all_regforms = get_event_regforms(self.event, session.user)
scheduled_and_registered_regforms = [regform[0] for regform in all_regforms
if regform[0].is_scheduled or regform[1]]
user_registrations = [regform[0].id for regform in all_regforms if regform[1]]
if len(scheduled_and_registered_regforms) == 1:
return redirect(url_for('.display_regform', scheduled_and_registered_regforms[0]))
return self.view_class.render_template('display/regform_list.html', self.event,
regforms=scheduled_and_registered_regforms,
user_registrations=user_registrations)
class RHParticipantList(RHRegistrationFormDisplayBase):
"""List of all public registrations"""
view_class = WPDisplayRegistrationParticipantList
@staticmethod
def _is_checkin_visible(reg):
return reg.registration_form.publish_checkin_enabled and reg.checked_in
def _merged_participant_list_table(self):
def _process_registration(reg, column_names):
personal_data = reg.get_personal_data()
columns = [{'text': personal_data.get(column_name, '')} for column_name in column_names]
return {'checked_in': self._is_checkin_visible(reg), 'columns': columns}
def _deduplicate_reg_data(reg_data_iter):
used = set()
for reg_data in reg_data_iter:
reg_data_hash = tuple(tuple(sorted(x.items())) for x in reg_data['columns'])
if reg_data_hash not in used:
used.add(reg_data_hash)
yield reg_data
column_names = registration_settings.get(self.event, 'participant_list_columns')
headers = [PersonalDataType[column_name].get_title() for column_name in column_names]
query = (Registration.query.with_parent(self.event)
.filter(Registration.is_publishable,
RegistrationForm.publish_registrations_enabled,
~RegistrationForm.is_deleted,
~Registration.is_deleted)
.join(Registration.registration_form)
.options(subqueryload('data').joinedload('field_data'),
contains_eager('registration_form')))
registrations = sorted(_deduplicate_reg_data(_process_registration(reg, column_names) for reg in query),
key=lambda reg: tuple(x['text'].lower() for x in reg['columns']))
return {'headers': headers,
'rows': registrations,
'show_checkin': any(registration['checked_in'] for registration in registrations)}
def _participant_list_table(self, regform):
def _process_registration(reg, column_ids, active_fields):
data_by_field = reg.data_by_field
def _content(column_id):
if column_id in data_by_field:
return data_by_field[column_id].get_friendly_data(for_humans=True)
elif (column_id in active_fields and active_fields[column_id].personal_data_type is not None and
active_fields[column_id].personal_data_type.column is not None):
# some legacy registrations have no data in the firstname/lastname/email field
# so we need to get it from the registration object itself
return getattr(reg, active_fields[column_id].personal_data_type.column)
else:
# no data available for the field
return ''
def _sort_key_date(column_id):
data = data_by_field.get(column_id)
if data and data.field_data.field.input_type == 'date':
return data.data
else:
return None
columns = [{'text': _content(column_id), 'sort_key': _sort_key_date(column_id)} for column_id in column_ids]
return {'checked_in': self._is_checkin_visible(reg), 'columns': columns}
active_fields = {field.id: field for field in regform.active_fields}
column_ids = [column_id
for column_id in registration_settings.get_participant_list_columns(self.event, regform)
if column_id in active_fields]
headers = [active_fields[column_id].title.title() for column_id in column_ids]
active_registrations = sorted(regform.active_registrations, key=attrgetter('last_name', 'first_name', 'id'))
registrations = [_process_registration(reg, column_ids, active_fields) for reg in active_registrations
if reg.is_publishable]
return {'headers': headers,
'rows': registrations,
'title': regform.title,
'show_checkin': any(registration['checked_in'] for registration in registrations)}
def _process(self):
regforms = (RegistrationForm.query.with_parent(self.event)
.filter(RegistrationForm.publish_registrations_enabled,
~RegistrationForm.is_deleted)
.options(subqueryload('registrations').subqueryload('data').joinedload('field_data'))
.all())
if registration_settings.get(self.event, 'merge_registration_forms'):
tables = [self._merged_participant_list_table()]
else:
tables = []
regforms_dict = {regform.id: regform for regform in regforms if regform.publish_registrations_enabled}
for form_id in registration_settings.get_participant_list_form_ids(self.event):
try:
regform = regforms_dict.pop(form_id)
except KeyError:
# The settings might reference forms that are not available
# anymore (publishing was disabled, etc.)
continue
tables.append(self._participant_list_table(regform))
# There might be forms that have not been sorted by the user yet
tables += map(self._participant_list_table, regforms_dict.viewvalues())
published = (RegistrationForm.query.with_parent(self.event)
.filter(RegistrationForm.publish_registrations_enabled)
.has_rows())
num_participants = sum(len(table['rows']) for table in tables)
return self.view_class.render_template(
'display/participant_list.html',
self.event,
regforms=regforms,
tables=tables,
published=published,
num_participants=num_participants
)
class InvitationMixin:
"""Mixin for RHs that accept an invitation token"""
def _process_args(self):
self.invitation = None
try:
token = request.args['invitation']
except KeyError:
return
try:
UUID(hex=token)
except ValueError:
flash(_("Your invitation code is not valid."), 'warning')
return
self.invitation = RegistrationInvitation.find(uuid=token).with_parent(self.regform).first()
if self.invitation is None:
flash(_("This invitation does not exist or has been withdrawn."), 'warning')
class RHRegistrationFormCheckEmail(RHRegistrationFormBase):
"""Checks how an email will affect the registration"""
def _process(self):
email = request.args['email'].lower().strip()
update = request.args.get('update')
management = request.args.get('management') == '1'
if update:
existing = self.regform.get_registration(uuid=update)
return jsonify(check_registration_email(self.regform, email, existing, management=management))
else:
return jsonify(check_registration_email(self.regform, email, management=management))
class RHRegistrationForm(InvitationMixin, RHRegistrationFormRegistrationBase):
"""Display a registration form and registrations, and process submissions"""
REGISTRATION_REQUIRED = False
normalize_url_spec = {
'locators': {
lambda self: self.regform
}
}
def _check_access(self):
RHRegistrationFormRegistrationBase._check_access(self)
if self.regform.require_login and not session.user and request.method != 'GET':
raise Forbidden(response=redirect_to_login(reason=_('You are trying to register with a form '
'that requires you to be logged in')))
def _process_args(self):
RHRegistrationFormRegistrationBase._process_args(self)
InvitationMixin._process_args(self)
if self.invitation and self.invitation.state == InvitationState.accepted and self.invitation.registration:
return redirect(url_for('.display_regform', self.invitation.registration.locator.registrant))
def _can_register(self):
return not self.regform.limit_reached and (self.regform.is_active or self.invitation)
def _process(self):
form = make_registration_form(self.regform)()
if self._can_register() and form.validate_on_submit():
registration = create_registration(self.regform, form.data, self.invitation)
return redirect(url_for('.display_regform', registration.locator.registrant))
elif form.is_submitted():
# not very pretty but usually this never happens thanks to client-side validation
for error in form.error_list:
flash(error, 'error')
user_data = {t.name: getattr(session.user, t.name, None) if session.user else '' for t in PersonalDataType}
if self.invitation:
user_data.update((attr, getattr(self.invitation, attr)) for attr in ('first_name', 'last_name', 'email'))
user_data['title'] = get_title_uuid(self.regform, user_data['title'])
return self.view_class.render_template('display/regform_display.html', self.event,
regform=self.regform,
sections=get_event_section_data(self.regform),
payment_conditions=payment_event_settings.get(self.event, 'conditions'),
payment_enabled=self.event.has_feature('payment'),
user_data=user_data,
invitation=self.invitation,
registration=self.registration,
management=False,
login_required=self.regform.require_login and not session.user)
class RHRegistrationDisplayEdit(RegistrationEditMixin, RHRegistrationFormRegistrationBase):
"""Submit a registration form"""
template_file = 'display/registration_modify.html'
management = False
REGISTRATION_REQUIRED = False
def _process_args(self):
RHRegistrationFormRegistrationBase._process_args(self)
if self.registration is None:
if session.user:
flash(_("We could not find a registration for you. If have already registered, please use the "
"direct access link from the email you received after registering."), 'warning')
else:
flash(_("We could not find a registration for you. If have already registered, please use the "
"direct access link from the email you received after registering or log in to your Indico "
"account."), 'warning')
return redirect(url_for('.display_regform', self.regform))
@property
def success_url(self):
return url_for('.display_regform', self.registration.locator.registrant)
class RHRegistrationFormDeclineInvitation(InvitationMixin, RHRegistrationFormBase):
"""Decline an invitation to register"""
def _process_args(self):
RHRegistrationFormBase._process_args(self)
InvitationMixin._process_args(self)
def _process(self):
if self.invitation.state == InvitationState.pending:
self.invitation.state = InvitationState.declined
flash(_("You declined the invitation to register."))
return redirect(self.event.url)
class RHTicketDownload(RHRegistrationFormRegistrationBase):
"""Generate ticket for a given registration"""
def _check_access(self):
RHRegistrationFormRegistrationBase._check_access(self)
if self.registration.state != RegistrationState.complete:
raise Forbidden
if not self.regform.tickets_enabled:
raise Forbidden
if (not self.regform.ticket_on_event_page and not self.regform.ticket_on_summary_page
and not self.regform.event.can_manage(session.user, 'registration')):
raise Forbidden
if self.registration.is_ticket_blocked:
raise Forbidden
def _process(self):
filename = secure_filename('{}-Ticket.pdf'.format(self.event.title), 'ticket.pdf')
return send_file(filename, generate_ticket(self.registration), 'application/pdf')
| 48.597668 | 120 | 0.655348 |
b58fbb5c7db7407f9bf00286d00aac34cb085c39
| 1,748 |
py
|
Python
|
hikari/urls.py
|
81CuongVn/hikaki
|
5e4ccffaccf411ea5c13fd64264cadda72d197fb
|
[
"MIT"
] | 2 |
2021-09-18T18:43:11.000Z
|
2021-12-30T11:54:26.000Z
|
hikari/urls.py
|
81CuongVn/hikaki
|
5e4ccffaccf411ea5c13fd64264cadda72d197fb
|
[
"MIT"
] | null | null | null |
hikari/urls.py
|
81CuongVn/hikaki
|
5e4ccffaccf411ea5c13fd64264cadda72d197fb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# cython: language_level=3
# Copyright (c) 2020 Nekokatt
# Copyright (c) 2021-present davfsa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""API-wide URLs."""
from __future__ import annotations
__all__: typing.List[str] = ["BASE_URL", "REST_API_URL", "OAUTH2_API_URL", "CDN_URL"]
import typing
BASE_URL: typing.Final[str] = "https://discord.com"
"""The base URL."""
REST_API_URL: typing.Final[str] = f"{BASE_URL}/api/v8"
"""The REST API URL."""
OAUTH2_API_URL: typing.Final[str] = f"{REST_API_URL}/oauth2"
"""The OAUTH2 API URL."""
CDN_URL: typing.Final[str] = "https://cdn.discordapp.com"
"""The CDN URL."""
MEDIA_PROXY_URL: typing.Final[str] = "https://media.discordapp.net"
"""The media proxy URL."""
| 38.844444 | 85 | 0.744851 |
404d5a5e1d824652360c9dda569e73eb7c0b7fb6
| 1,201 |
py
|
Python
|
pipeline/scripts/ttest.py
|
SherineAwad/ribofilio
|
4dea38692e7715f07df3ee074e2adc5380f4d6e9
|
[
"MIT"
] | null | null | null |
pipeline/scripts/ttest.py
|
SherineAwad/ribofilio
|
4dea38692e7715f07df3ee074e2adc5380f4d6e9
|
[
"MIT"
] | null | null | null |
pipeline/scripts/ttest.py
|
SherineAwad/ribofilio
|
4dea38692e7715f07df3ee074e2adc5380f4d6e9
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
import sys
import argparse
import screed
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import os.path
from scipy.stats import t
def getstats(infile1, infile2):
sample1 = []
sample2 = []
count = 0
for line in open(infile1):
if count == 0:
count+=1
continue
dr1, _, _, _, SE1, _, _, _, n1 = line.split('\t')
count = 0
for line in open(infile2):
if count == 0:
count+=1
continue
dr2, _, _, _, SE2, _, _, _, n2 = line.split('\t')
df = ( float(n1) + float(n2) )-4
tscore = (float(dr1) - float(dr2)) / np.sqrt(np.square(float(SE1)) +np.square(float(SE2)) )
pvalue = 2 * (t.sf(abs(tscore),df= df))
pvalue = np.round(pvalue, decimals=4)
print(infile1,'\t', infile2)
print("tscore",'\t', "pvalue")
print(tscore,'\t', pvalue)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('infile1', default=False)
parser.add_argument('infile2', default=False)
args = parser.parse_args()
getstats(args.infile1, args.infile2)
if __name__ == '__main__':
main()
| 25.553191 | 96 | 0.586178 |
306d8a23394d2f1aa4f800e6a39a39a9416308e6
| 6,816 |
py
|
Python
|
tests/unit/providers/test_dict_py2_py3.py
|
YelloFam/python-dependency-injector
|
541131e33858ee1b8b5a7590d2bb9f929740ea1e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/providers/test_dict_py2_py3.py
|
YelloFam/python-dependency-injector
|
541131e33858ee1b8b5a7590d2bb9f929740ea1e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/providers/test_dict_py2_py3.py
|
YelloFam/python-dependency-injector
|
541131e33858ee1b8b5a7590d2bb9f929740ea1e
|
[
"BSD-3-Clause"
] | null | null | null |
"""Dict provider tests."""
import sys
from dependency_injector import providers
def test_is_provider():
assert providers.is_provider(providers.Dict()) is True
def test_provided_instance_provider():
provider = providers.Dict()
assert isinstance(provider.provided, providers.ProvidedInstance)
def test_init_with_non_string_keys():
a1 = object()
a2 = object()
provider = providers.Dict({a1: "i1", a2: "i2"})
dict1 = provider()
dict2 = provider()
assert dict1 == {a1: "i1", a2: "i2"}
assert dict2 == {a1: "i1", a2: "i2"}
assert dict1 is not dict2
def test_init_with_string_and_non_string_keys():
a1 = object()
provider = providers.Dict({a1: "i1"}, a2="i2")
dict1 = provider()
dict2 = provider()
assert dict1 == {a1: "i1", "a2": "i2"}
assert dict2 == {a1: "i1", "a2": "i2"}
assert dict1 is not dict2
def test_call_with_init_keyword_args():
provider = providers.Dict(a1="i1", a2="i2")
dict1 = provider()
dict2 = provider()
assert dict1 == {"a1": "i1", "a2": "i2"}
assert dict2 == {"a1": "i1", "a2": "i2"}
assert dict1 is not dict2
def test_call_with_context_keyword_args():
provider = providers.Dict(a1="i1", a2="i2")
assert provider(a3="i3", a4="i4") == {"a1": "i1", "a2": "i2", "a3": "i3", "a4": "i4"}
def test_call_with_provider():
provider = providers.Dict(
a1=providers.Factory(str, "i1"),
a2=providers.Factory(str, "i2"),
)
assert provider() == {"a1": "i1", "a2": "i2"}
def test_fluent_interface():
provider = providers.Dict() \
.add_kwargs(a1="i1", a2="i2")
assert provider() == {"a1": "i1", "a2": "i2"}
def test_add_kwargs():
provider = providers.Dict() \
.add_kwargs(a1="i1") \
.add_kwargs(a2="i2")
assert provider.kwargs == {"a1": "i1", "a2": "i2"}
def test_add_kwargs_non_string_keys():
a1 = object()
a2 = object()
provider = providers.Dict() \
.add_kwargs({a1: "i1"}) \
.add_kwargs({a2: "i2"})
assert provider.kwargs == {a1: "i1", a2: "i2"}
def test_add_kwargs_string_and_non_string_keys():
a2 = object()
provider = providers.Dict() \
.add_kwargs(a1="i1") \
.add_kwargs({a2: "i2"})
assert provider.kwargs == {"a1": "i1", a2: "i2"}
def test_set_kwargs():
provider = providers.Dict() \
.add_kwargs(a1="i1", a2="i2") \
.set_kwargs(a3="i3", a4="i4")
assert provider.kwargs == {"a3": "i3", "a4": "i4"}
def test_set_kwargs_non_string_keys():
a3 = object()
a4 = object()
provider = providers.Dict() \
.add_kwargs(a1="i1", a2="i2") \
.set_kwargs({a3: "i3", a4: "i4"})
assert provider.kwargs == {a3: "i3", a4: "i4"}
def test_set_kwargs_string_and_non_string_keys():
a3 = object()
provider = providers.Dict() \
.add_kwargs(a1="i1", a2="i2") \
.set_kwargs({a3: "i3"}, a4="i4")
assert provider.kwargs == {a3: "i3", "a4": "i4"}
def test_clear_kwargs():
provider = providers.Dict() \
.add_kwargs(a1="i1", a2="i2") \
.clear_kwargs()
assert provider.kwargs == {}
def test_call_overridden():
provider = providers.Dict(a1="i1", a2="i2")
overriding_provider1 = providers.Dict(a2="i2", a3="i3")
overriding_provider2 = providers.Dict(a3="i3", a4="i4")
provider.override(overriding_provider1)
provider.override(overriding_provider2)
instance1 = provider()
instance2 = provider()
assert instance1 is not instance2
assert instance1 == {"a3": "i3", "a4": "i4"}
assert instance2 == {"a3": "i3", "a4": "i4"}
def test_deepcopy():
provider = providers.Dict(a1="i1", a2="i2")
provider_copy = providers.deepcopy(provider)
assert provider is not provider_copy
assert provider.kwargs == provider_copy.kwargs
assert isinstance(provider, providers.Dict)
def test_deepcopy_from_memo():
provider = providers.Dict(a1="i1", a2="i2")
provider_copy_memo = providers.Dict(a1="i1", a2="i2")
provider_copy = providers.deepcopy(
provider,
memo={id(provider): provider_copy_memo},
)
assert provider_copy is provider_copy_memo
def test_deepcopy_kwargs():
provider = providers.Dict()
dependent_provider1 = providers.Factory(list)
dependent_provider2 = providers.Factory(dict)
provider.add_kwargs(d1=dependent_provider1, d2=dependent_provider2)
provider_copy = providers.deepcopy(provider)
dependent_provider_copy1 = provider_copy.kwargs["d1"]
dependent_provider_copy2 = provider_copy.kwargs["d2"]
assert provider.kwargs != provider_copy.kwargs
assert dependent_provider1.cls is dependent_provider_copy1.cls
assert dependent_provider1 is not dependent_provider_copy1
assert dependent_provider2.cls is dependent_provider_copy2.cls
assert dependent_provider2 is not dependent_provider_copy2
def test_deepcopy_kwargs_non_string_keys():
a1 = object()
a2 = object()
dependent_provider1 = providers.Factory(list)
dependent_provider2 = providers.Factory(dict)
provider = providers.Dict({a1: dependent_provider1, a2: dependent_provider2})
provider_copy = providers.deepcopy(provider)
dependent_provider_copy1 = provider_copy.kwargs[a1]
dependent_provider_copy2 = provider_copy.kwargs[a2]
assert provider.kwargs != provider_copy.kwargs
assert dependent_provider1.cls is dependent_provider_copy1.cls
assert dependent_provider1 is not dependent_provider_copy1
assert dependent_provider2.cls is dependent_provider_copy2.cls
assert dependent_provider2 is not dependent_provider_copy2
def test_deepcopy_overridden():
provider = providers.Dict()
object_provider = providers.Object(object())
provider.override(object_provider)
provider_copy = providers.deepcopy(provider)
object_provider_copy = provider_copy.overridden[0]
assert provider is not provider_copy
assert provider.kwargs == provider_copy.kwargs
assert isinstance(provider, providers.Dict)
assert object_provider is not object_provider_copy
assert isinstance(object_provider_copy, providers.Object)
def test_deepcopy_with_sys_streams():
provider = providers.Dict()
provider.add_kwargs(stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr)
provider_copy = providers.deepcopy(provider)
assert provider is not provider_copy
assert isinstance(provider_copy, providers.Dict)
assert provider.kwargs["stdin"] is sys.stdin
assert provider.kwargs["stdout"] is sys.stdout
assert provider.kwargs["stderr"] is sys.stderr
def test_repr():
provider = providers.Dict(a1=1, a2=2)
assert repr(provider) == (
"<dependency_injector.providers."
"Dict({0}) at {1}>".format(repr(provider.kwargs), hex(id(provider)))
)
| 27.707317 | 89 | 0.671215 |
151f12714aa41019071fa480878554d44bbbb0bb
| 5,727 |
py
|
Python
|
contrib/seeds/makeseeds.py
|
PERSHYANCOIN/PERSHYANCOIN
|
bbadf90495732ecdbf5ab9a27e84e1dbdaff117d
|
[
"MIT"
] | 1 |
2018-02-21T07:10:01.000Z
|
2018-02-21T07:10:01.000Z
|
contrib/seeds/makeseeds.py
|
pershyancoin/pershyancoin
|
bbadf90495732ecdbf5ab9a27e84e1dbdaff117d
|
[
"MIT"
] | 2 |
2018-02-12T22:00:38.000Z
|
2018-02-12T22:01:03.000Z
|
contrib/seeds/makeseeds.py
|
PERSHYANCOIN/PERSHYANCOIN
|
bbadf90495732ecdbf5ab9a27e84e1dbdaff117d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2013-2017 The Pershyancoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
"130.211.129.106", "178.63.107.226",
"83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6",
"54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211",
"54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214",
"54.94.195.96", "54.94.200.247"
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/Satoshi:0.13.(1|2|99)/|/Satoshi:0.14.(0|1|2|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple pershyancoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| 33.104046 | 186 | 0.570107 |
5ea0345f63bc57baa0d0dcc554ff3fbf143ca5f2
| 7,914 |
py
|
Python
|
storage/emulated/0/qpython/lib/python3.2/site-packages/aip/base.py
|
wangkaibiao/SettlersFinancialData3
|
498249e14f24bfa3186f07e8f66ee624d08c6ff1
|
[
"MIT"
] | null | null | null |
storage/emulated/0/qpython/lib/python3.2/site-packages/aip/base.py
|
wangkaibiao/SettlersFinancialData3
|
498249e14f24bfa3186f07e8f66ee624d08c6ff1
|
[
"MIT"
] | null | null | null |
storage/emulated/0/qpython/lib/python3.2/site-packages/aip/base.py
|
wangkaibiao/SettlersFinancialData3
|
498249e14f24bfa3186f07e8f66ee624d08c6ff1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
AipBase
"""
import hmac
import json
import hashlib
import datetime
import base64
import time
import sys
import requests
requests.packages.urllib3.disable_warnings()
if sys.version_info.major == 2:
from urllib import urlencode
from urllib import quote
from urlparse import urlparse
else:
from urllib.parse import urlencode
from urllib.parse import quote
from urllib.parse import urlparse
class AipBase(object):
"""
AipBase
"""
__accessTokenUrl = 'https://aip.baidubce.com/oauth/2.0/token'
__reportUrl = 'https://aip.baidubce.com/rpc/2.0/feedback/v1/report'
__scope = 'brain_all_scope'
def __init__(self, appId, apiKey, secretKey):
"""
AipBase(appId, apiKey, secretKey)
"""
self._appId = appId.strip()
self._apiKey = apiKey.strip()
self._secretKey = secretKey.strip()
self._authObj = {}
self._isCloudUser = None
self.__client = requests
self.__connectTimeout = 60.0
self.__socketTimeout = 60.0
self._proxies = {}
self.__version = '2_2_10'
def getVersion(self):
"""
version
"""
return self.__version
def setConnectionTimeoutInMillis(self, ms):
"""
setConnectionTimeoutInMillis
"""
self.__connectTimeout = ms / 1000.0
def setSocketTimeoutInMillis(self, ms):
"""
setSocketTimeoutInMillis
"""
self.__socketTimeout = ms / 1000.0
def setProxies(self, proxies):
"""
proxies
"""
self._proxies = proxies
def _request(self, url, data, headers=None):
"""
self._request('', {})
"""
try:
result = self._validate(url, data)
if result != True:
return result
authObj = self._auth()
params = self._getParams(authObj)
data = self._proccessRequest(url, params, data, headers)
headers = self._getAuthHeaders('POST', url, params, headers)
response = self.__client.post(url, data=data, params=params,
headers=headers, verify=False, timeout=(
self.__connectTimeout,
self.__socketTimeout,
), proxies=self._proxies
)
obj = self._proccessResult(response.content)
if not self._isCloudUser and obj.get('error_code', '') == 110:
authObj = self._auth(True)
params = self._getParams(authObj)
response = self.__client.post(url, data=data, params=params,
headers=headers, verify=False, timeout=(
self.__connectTimeout,
self.__socketTimeout,
), proxies=self._proxies
)
obj = self._proccessResult(response.content)
except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectTimeout) as e:
return {
'error_code': 'SDK108',
'error_msg': 'connection or read data timeout',
}
return obj
def _validate(self, url, data):
"""
validate
"""
return True
def _proccessRequest(self, url, params, data, headers):
"""
参数处理
"""
params['aipSdk'] = 'python'
params['aipVersion'] = self.__version
return data
def _proccessResult(self, content):
"""
formate result
"""
if sys.version_info.major == 2:
return json.loads(content) or {}
else:
return json.loads(content.decode()) or {}
def _auth(self, refresh=False):
"""
api access auth
"""
#未过期
if not refresh:
tm = self._authObj.get('time', 0) + int(self._authObj.get('expires_in', 0)) - 30
if tm > int(time.time()):
return self._authObj
obj = self.__client.get(self.__accessTokenUrl, verify=False, params={
'grant_type': 'client_credentials',
'client_id': self._apiKey,
'client_secret': self._secretKey,
}, timeout=(
self.__connectTimeout,
self.__socketTimeout,
), proxies=self._proxies).json()
self._isCloudUser = not self._isPermission(obj)
obj['time'] = int(time.time())
self._authObj = obj
return obj
def _isPermission(self, authObj):
"""
check whether permission
"""
scopes = authObj.get('scope', '')
return self.__scope in scopes.split(' ')
def _getParams(self, authObj):
"""
api request http url params
"""
params = {}
if self._isCloudUser == False:
params['access_token'] = authObj['access_token']
return params
def _getAuthHeaders(self, method, url, params=None, headers=None):
"""
api request http headers
"""
headers = headers or {}
params = params or {}
if self._isCloudUser == False:
return headers
urlResult = urlparse(url)
for kv in urlResult.query.strip().split('&'):
if kv:
k, v = kv.split('=')
params[k] = v
# UTC timestamp
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
headers['Host'] = urlResult.hostname
headers['x-bce-date'] = timestamp
version, expire = '1', '1800'
# 1 Generate SigningKey
val = "bce-auth-v%s/%s/%s/%s" % (version, self._apiKey, timestamp, expire)
signingKey = hmac.new(self._secretKey.encode('utf-8'), val.encode('utf-8'),
hashlib.sha256
).hexdigest()
# 2 Generate CanonicalRequest
# 2.1 Genrate CanonicalURI
canonicalUri = quote(urlResult.path)
# 2.2 Generate CanonicalURI: not used here
# 2.3 Generate CanonicalHeaders: only include host here
canonicalHeaders = []
for header, val in headers.items():
canonicalHeaders.append(
'%s:%s' % (
quote(header.strip(), '').lower(),
quote(val.strip(), '')
)
)
canonicalHeaders = '\n'.join(sorted(canonicalHeaders))
# 2.4 Generate CanonicalRequest
canonicalRequest = '%s\n%s\n%s\n%s' % (
method.upper(),
canonicalUri,
'&'.join(sorted(urlencode(params).split('&'))),
canonicalHeaders
)
# 3 Generate Final Signature
signature = hmac.new(signingKey.encode('utf-8'), canonicalRequest.encode('utf-8'),
hashlib.sha256
).hexdigest()
headers['authorization'] = 'bce-auth-v%s/%s/%s/%s/%s/%s' % (
version,
self._apiKey,
timestamp,
expire,
';'.join(headers.keys()).lower(),
signature
)
return headers
def report(self, feedback):
"""
数据反馈
"""
data = {}
data['feedback'] = feedback
return self._request(self.__reportUrl, data)
def post(self, url, data, headers=None):
"""
self.post('', {})
"""
return self._request(url, data, headers)
| 28.365591 | 93 | 0.504296 |
810fd1bdc346c4399f18d1edbe0c8ed683392e38
| 16,418 |
py
|
Python
|
contrastive/models/contrastive_learner_with_labels.py
|
neurospin-projects/2022_jchavas_cingulate_inhibitory_control
|
30e63f0af62fa83abd3858720ce3f3a15a3fbaea
|
[
"MIT"
] | null | null | null |
contrastive/models/contrastive_learner_with_labels.py
|
neurospin-projects/2022_jchavas_cingulate_inhibitory_control
|
30e63f0af62fa83abd3858720ce3f3a15a3fbaea
|
[
"MIT"
] | null | null | null |
contrastive/models/contrastive_learner_with_labels.py
|
neurospin-projects/2022_jchavas_cingulate_inhibitory_control
|
30e63f0af62fa83abd3858720ce3f3a15a3fbaea
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This software and supporting documentation are distributed by
# Institut Federatif de Recherche 49
# CEA/NeuroSpin, Batiment 145,
# 91191 Gif-sur-Yvette cedex
# France
#
# This software is governed by the CeCILL license version 2 under
# French law and abiding by the rules of distribution of free software.
# You can use, modify and/or redistribute the software under the
# terms of the CeCILL license version 2 as circulated by CEA, CNRS
# and INRIA at the following URL "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license version 2 and that you accept its terms.
"""
Some helper functions are taken from:
https://learnopencv.com/tensorboard-with-pytorch-lightning
"""
import numpy as np
import torch
from sklearn.manifold import TSNE
from toolz.itertoolz import first
from contrastive.models.contrastive_learner import ContrastiveLearner
from contrastive.losses import GeneralizedSupervisedNTXenLoss
from contrastive.utils.plots.visualize_images \
import plot_scatter_matrix_with_labels
from contrastive.utils.plots.visualize_tsne import plot_tsne
class SaveOutput:
def __init__(self):
self.outputs = {}
def __call__(self, module, module_in, module_out):
self.outputs[module] = module_out.cpu()
def clear(self):
self.outputs = {}
class ContrastiveLearner_WithLabels(ContrastiveLearner):
def __init__(self, config, sample_data):
super(ContrastiveLearner_WithLabels, self).__init__(
config=config, sample_data=sample_data)
def plot_scatter_matrices_with_labels(self):
"""Plots scatter matrices with label values."""
# Makes scatter matrix of output space
r = self.compute_outputs_skeletons(
self.sample_data.train_dataloader())
X = r[0] # First element of tuple
labels = r[1] # Second element of tuple
# Makes scatter matrix of output space with label values
scatter_matrix_outputs_with_labels = \
plot_scatter_matrix_with_labels(X, labels, buffer=True)
self.logger.experiment.add_image(
'scatter_matrix_outputs_with_labels',
scatter_matrix_outputs_with_labels,
self.current_epoch)
# Makes scatter matrix of representation space with label values
r = self.compute_representations(
self.sample_data.train_dataloader())
X = r[0] # First element of tuple
labels = r[1] # Second element of tuple
scatter_matrix_representations_with_labels = \
plot_scatter_matrix_with_labels(X, labels, buffer=True)
self.logger.experiment.add_image(
'scatter_matrix_representations_with_labels',
scatter_matrix_representations_with_labels,
self.current_epoch)
def generalized_supervised_nt_xen_loss(self, z_i, z_j, labels):
"""Loss function for contrastive"""
temperature = max(self.config.temperature,
self.config.temperature_initial
- self.current_epoch/50. *
(self.config.temperature_initial - self.config.temperature))
loss = GeneralizedSupervisedNTXenLoss(
temperature=temperature,
sigma=self.config.sigma_labels,
proportion_pure_contrastive=self.config.proportion_pure_contrastive,
return_logits=True)
return loss.forward(z_i, z_j, labels)
def training_step(self, train_batch, batch_idx):
"""Training step.
"""
(inputs, labels, filenames) = train_batch
input_i = inputs[:, 0, :]
input_j = inputs[:, 1, :]
z_i = self.forward(input_i)
z_j = self.forward(input_j)
if self.config.mode == "decoder":
sample = inputs[:, 2, :]
batch_loss = self.cross_entropy_loss(sample, z_i, z_j)
else:
batch_loss, sim_zij, sim_zii, sim_zjj, correct_pair, weights = \
self.generalized_supervised_nt_xen_loss(z_i, z_j, labels)
self.log('train_loss', float(batch_loss))
# Only computes graph on first step
if self.global_step == 1:
self.logger.experiment.add_graph(self, inputs[:, 0, :])
# Records sample for first batch of each epoch
if batch_idx == 0:
self.sample_i = inputs[:, 0, :].cpu()
self.sample_j = inputs[:, 1, :].cpu()
if self.config.mode != "decoder":
self.sim_zij = sim_zij * self.config.temperature
self.sim_zii = sim_zii * self.config.temperature
self.sim_zjj = sim_zjj * self.config.temperature
self.weights = weights
# logs - a dictionary
logs = {"train_loss": float(batch_loss)}
batch_dictionary = {
# REQUIRED: It is required for us to return "loss"
"loss": batch_loss,
# optional for batch logging purposes
"log": logs,
}
return batch_dictionary
def compute_outputs_skeletons(self, loader):
"""Computes the outputs of the model for each crop.
This includes the projection head"""
# Initialization
X = torch.zeros([0, self.config.num_outputs]).cpu()
labels_all = torch.zeros([0, 1]).cpu()
filenames_list = []
# Computes embeddings without computing gradient
with torch.no_grad():
for (inputs, labels, filenames) in loader:
# First views of the whole batch
inputs = inputs.cuda()
model = self.cuda()
X_i = model.forward(inputs[:, 0, :])
# Second views of the whole batch
X_j = model.forward(inputs[:, 1, :])
# We now concatenate the embeddings
# First views and second views are put side by side
X_reordered = torch.cat([X_i, X_j], dim=-1)
# X_i and X_j elements are put in successin by index
# X_i[0], X_j[0], X_i[1], X_j[1],... X_i[N], X_j[N]
# N being the number of samples in the batch
X_reordered = X_reordered.view(-1, X_i.shape[-1])
# At the end, it is concataneted with previous X
X = torch.cat((X, X_reordered.cpu()), dim=0)
# We now concatenate the labels
labels_reordered = torch.cat([labels, labels], dim=-1)
labels_reordered = labels_reordered.view(-1, labels.shape[-1])
# At the end, labels are concatenated
labels_all = torch.cat((labels_all, labels_reordered.cpu()),
dim=0)
filenames_duplicate = [
item for item in filenames
for repetitions in range(2)]
filenames_list = filenames_list + filenames_duplicate
del inputs
return X, labels_all, filenames_list
def compute_decoder_outputs_skeletons(self, loader):
"""Computes the outputs of the model for each crop.
This includes the projection head"""
# Initialization
X = torch.zeros([0, 2, 20, 40, 40]).cpu()
filenames_list = []
# Computes embeddings without computing gradient
with torch.no_grad():
for (inputs, labels, filenames) in loader:
# First views of the whole batch
inputs = inputs.cuda()
model = self.cuda()
X_i = model.forward(inputs[:, 0, :])
# First views re put side by side
X = torch.cat((X, X_i.cpu()), dim=0)
filenames_duplicate = [item
for item in filenames]
filenames_list = filenames_list + filenames_duplicate
del inputs
return X, filenames_list
def compute_representations(self, loader):
"""Computes representations for each crop.
Representation are before the projection head"""
# Initialization
X = torch.zeros([0, self.config.num_representation_features]).cpu()
labels_all = torch.zeros([0, 1]).cpu()
filenames_list = []
# Computes representation (without gradient computation)
with torch.no_grad():
for (inputs, labels, filenames) in loader:
# We first compute the embeddings
# for the first views of the whole batch
inputs = inputs.cuda()
model = self.cuda()
model.forward(inputs[:, 0, :])
X_i = first(self.save_output.outputs.values())
# We then compute the embeddings for the second views
# of the whole batch
model.forward(inputs[:, 1, :])
X_j = first(self.save_output.outputs.values())
# We now concatenate the embeddings
# First views and second views are put side by side
X_reordered = torch.cat([X_i, X_j], dim=-1)
# X_i and X_j elements are put in successin by index
# X_i[0], X_j[0], X_i[1], X_j[1],... X_i[N], X_j[N]
# N being the number of samples in the batch
X_reordered = X_reordered.view(-1, X_i.shape[-1])
# At the end, it is concataneted with previous X
X = torch.cat((X, X_reordered.cpu()), dim=0)
# We now concatenate the labels
labels_reordered = torch.cat([labels, labels], dim=-1)
labels_reordered = labels_reordered.view(-1, labels.shape[-1])
# At the end, labels are concatenated
labels_all = torch.cat((labels_all, labels_reordered.cpu()),
dim=0)
filenames_duplicate = [
item for item in filenames
for repetitions in range(2)]
filenames_list = filenames_list + filenames_duplicate
del inputs
return X, labels_all, filenames_list
def compute_tsne(self, loader, register):
"""Computes t-SNE.
It is computed either in the representation
or in the output space"""
if register == "output":
X, _, _ = self.compute_outputs_skeletons(loader)
elif register == "representation":
X, _, _ = self.compute_representations(loader)
else:
raise ValueError(
"Argument register must be either output or representation")
tsne = TSNE(n_components=2, perplexity=25, init='pca', random_state=50)
Y = X.detach().numpy()
# Makes the t-SNE fit
X_tsne = tsne.fit_transform(Y)
# Returns tsne embeddings
return X_tsne
def training_epoch_end(self, outputs):
"""Computation done at the end of the epoch"""
if self.config.mode == "encoder":
# Computes t-SNE both in representation and output space
if self.current_epoch % self.config.nb_epochs_per_tSNE == 0 \
or self.current_epoch >= self.config.max_epochs:
X_tsne = self.compute_tsne(
self.sample_data.train_dataloader(), "output")
image_TSNE = plot_tsne(X_tsne, buffer=True)
self.logger.experiment.add_image(
'TSNE output image', image_TSNE, self.current_epoch)
X_tsne = self.compute_tsne(
self.sample_data.train_dataloader(), "representation")
image_TSNE = plot_tsne(X_tsne, buffer=True)
self.logger.experiment.add_image(
'TSNE representation image', image_TSNE, self.current_epoch)
# Plots zxx and weights histograms
self.plot_histograms()
# Plots scatter matrices
self.plot_scatter_matrices()
# Plots scatter matrices with label values
self.plot_scatter_matrices_with_labels()
# Plots views
self.plot_views()
# calculates average loss
avg_loss = torch.stack([x['loss'] for x in outputs]).mean()
# logs histograms
# self.custom_histogram_adder()
# logging using tensorboard logger
self.logger.experiment.add_scalar(
"Loss/Train",
avg_loss,
self.current_epoch)
def validation_step(self, val_batch, batch_idx):
"""Validation step"""
(inputs, labels, filenames) = val_batch
input_i = inputs[:, 0, :]
input_j = inputs[:, 1, :]
z_i = self.forward(input_i)
z_j = self.forward(input_j)
if self.config.mode == "decoder":
sample = inputs[:, 2, :]
batch_loss = self.cross_entropy_loss(sample, z_i, z_j)
else:
batch_loss, sim_zij, sim_sii, sim_sjj, correct_pairs, weights = \
self.generalized_supervised_nt_xen_loss(z_i, z_j, labels)
self.log('val_loss', float(batch_loss))
# logs- a dictionary
logs = {"val_loss": float(batch_loss)}
batch_dictionary = {
# REQUIRED: It is required for us to return "loss"
"loss": batch_loss,
# optional for batch logging purposes
"log": logs,
}
return batch_dictionary
def validation_epoch_end(self, outputs):
"""Computaion done at the end of each validation epoch"""
# Computes t-SNE
if self.config.mode == "encoder":
if self.current_epoch % self.config.nb_epochs_per_tSNE == 0 \
or self.current_epoch >= self.config.max_epochs:
X_tsne = self.compute_tsne(
self.sample_data.val_dataloader(), "output")
image_TSNE = plot_tsne(X_tsne, buffer=True)
self.logger.experiment.add_image(
'TSNE output validation image', image_TSNE, self.current_epoch)
X_tsne = self.compute_tsne(
self.sample_data.val_dataloader(),
"representation")
image_TSNE = plot_tsne(X_tsne, buffer=True)
self.logger.experiment.add_image(
'TSNE representation validation image',
image_TSNE,
self.current_epoch)
# Makes scatter matrix of representation space
X, labels, _ = self.compute_representations(
self.sample_data.val_dataloader())
# Makes scatter matrix of representation space with label values
scatter_matrix_representations_with_labels = \
plot_scatter_matrix_with_labels(X, labels, buffer=True)
self.logger.experiment.add_image(
'scatter_matrix_representations_with_labels_validation',
scatter_matrix_representations_with_labels,
self.current_epoch)
# calculates average loss
avg_loss = torch.stack([x['loss'] for x in outputs]).mean()
# logs losses using tensorboard logger
self.logger.experiment.add_scalar(
"Loss/Validation",
avg_loss,
self.current_epoch)
| 39.561446 | 86 | 0.604459 |
e0a56d468f9b6270babbef3a1eafcd01d629ed1a
| 1,945 |
py
|
Python
|
cauldron/test/cli/commands/test_reload.py
|
JohnnyPeng18/cauldron
|
09120c2a4cef65df46f8c0c94f5d79395b3298cd
|
[
"MIT"
] | 90 |
2016-09-02T15:11:10.000Z
|
2022-01-02T11:37:57.000Z
|
cauldron/test/cli/commands/test_reload.py
|
JohnnyPeng18/cauldron
|
09120c2a4cef65df46f8c0c94f5d79395b3298cd
|
[
"MIT"
] | 86 |
2016-09-23T16:52:22.000Z
|
2022-03-31T21:39:56.000Z
|
cauldron/test/cli/commands/test_reload.py
|
JohnnyPeng18/cauldron
|
09120c2a4cef65df46f8c0c94f5d79395b3298cd
|
[
"MIT"
] | 261 |
2016-12-22T05:36:48.000Z
|
2021-11-26T12:40:42.000Z
|
from unittest.mock import patch
from cauldron.test import support
from cauldron.test.support import scaffolds
class TestReload(scaffolds.ResultsTest):
"""..."""
def test_reload(self):
"""Should reload the currently opened project."""
support.run_command('open @examples:hello_cauldron --forget')
r = support.run_command('reload')
self.assertFalse(r.failed, 'should not have failed')
def test_no_open_project(self):
"""Should fail when no project is open."""
r = support.run_command('reload')
self.assertTrue(r.failed, 'should have failed')
self.assertEqual(r.errors[0].code, 'NO_PROJECT_FOUND')
@patch('time.sleep')
def test_missing_project_path(self, *args):
"""Should fail if the project directory does not exist."""
support.run_command('open @examples:hello_cauldron --forget')
with patch('os.path.exists') as path_exists:
path_exists.return_value = False
r = support.run_command('reload')
self.assertTrue(r.failed, 'should have failed')
self.assertEqual(r.errors[0].code, 'MISSING_PROJECT_PATH')
@patch('time.sleep')
def test_initialize_failure(self, *args):
"""Should fail if cannot initialize project."""
support.run_command('open @examples:hello_cauldron --forget')
with patch('cauldron.runner.initialize') as runner_initialize:
runner_initialize.side_effect = FileNotFoundError('Fake Error')
r = support.run_command('reload')
self.assertTrue(r.failed, 'should have failed')
self.assertEqual(r.errors[0].code, 'PROJECT_INIT_FAILURE')
def test_reload_remote(self):
"""Should reload the currently opened project."""
support.run_command('open @examples:hello_cauldron --forget')
r = support.run_remote_command('reload')
self.assertFalse(r.failed, 'should not have failed')
| 38.137255 | 75 | 0.672494 |
105ea140070ba362676f80d83f3968a9d3e05a21
| 491 |
py
|
Python
|
vilya/views/api/v1/projects/commits.py
|
mubashshirjamal/code
|
d9c7adf7efed8e9c1ab3ff8cdeb94e7eb1a45382
|
[
"BSD-3-Clause"
] | 1,582 |
2015-01-05T02:41:44.000Z
|
2022-03-30T20:03:22.000Z
|
vilya/views/api/v1/projects/commits.py
|
mubashshirjamal/code
|
d9c7adf7efed8e9c1ab3ff8cdeb94e7eb1a45382
|
[
"BSD-3-Clause"
] | 66 |
2015-01-23T07:58:04.000Z
|
2021-11-12T02:23:27.000Z
|
vilya/views/api/v1/projects/commits.py
|
mubashshirjamal/code
|
d9c7adf7efed8e9c1ab3ff8cdeb94e7eb1a45382
|
[
"BSD-3-Clause"
] | 347 |
2015-01-05T07:47:07.000Z
|
2021-09-20T21:22:32.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from vilya.views.api.utils import RestAPIUI
class CommitsUI(RestAPIUI):
_q_exports = []
_q_methods = ['get']
def __init__(self, project):
self.project = project
def get(self, request):
repo = self.project.repo
commits = repo.get_commits('HEAD', 'HEAD~5')
if not commits:
return {'commits':[]}
return dict(commits=[commit.as_dict() for commit in commits])
| 24.55 | 69 | 0.627291 |
4fe8cc91954aaeb34bbe8aaed64c6a646d649096
| 3,926 |
py
|
Python
|
datadog_checks_dev/datadog_checks/dev/tooling/commands/env/test.py
|
jessehub/integrations-core
|
76955b6e55beae7bc5c2fd25867955d2a3c8d5ef
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_dev/datadog_checks/dev/tooling/commands/env/test.py
|
jessehub/integrations-core
|
76955b6e55beae7bc5c2fd25867955d2a3c8d5ef
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_dev/datadog_checks/dev/tooling/commands/env/test.py
|
jessehub/integrations-core
|
76955b6e55beae7bc5c2fd25867955d2a3c8d5ef
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2019
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import click
from .... import EnvVars
from ...e2e import create_interface, get_configured_envs
from ...e2e.agent import DEFAULT_PYTHON_VERSION
from ...testing import get_tox_envs
from ..console import CONTEXT_SETTINGS, echo_info, echo_warning
from ..test import test as test_command
from .start import start
from .stop import stop
@click.command(context_settings=CONTEXT_SETTINGS, short_help='Test an environment')
@click.argument('checks', nargs=-1)
@click.option(
'--agent',
'-a',
default='6',
help=(
'The agent build to use e.g. a Docker image like `datadog/agent:6.5.2`. For '
'Docker environments you can use an integer corresponding to fields in the '
'config (agent5, agent6, etc.)'
),
)
@click.option(
'--python',
'-py',
type=click.INT,
help='The version of Python to use. Defaults to {} if no tox Python is specified.'.format(DEFAULT_PYTHON_VERSION),
)
@click.option('--dev/--prod', default=None, help='Whether to use the latest version of a check or what is shipped')
@click.option('--base', is_flag=True, help='Whether to use the latest version of the base check or what is shipped')
@click.option(
'--env-vars',
'-e',
multiple=True,
help=(
'ENV Variable that should be passed to the Agent container. '
'Ex: -e DD_URL=app.datadoghq.com -e DD_API_KEY=123456'
),
)
@click.option('--new-env', '-ne', is_flag=True, help='Execute setup and tear down actions')
@click.option('--profile-memory', '-pm', is_flag=True, help='Whether to collect metrics about memory usage')
@click.pass_context
def test(ctx, checks, agent, python, dev, base, env_vars, new_env, profile_memory):
"""Test an environment."""
check_envs = get_tox_envs(checks, e2e_tests_only=True)
tests_ran = False
# If no checks are specified it means we're testing what has changed compared
# to master, probably on CI rather than during local development. In this case,
# ensure environments and Agents are spun up and down.
if not checks:
new_env = True
# Default to testing the local development version.
if dev is None:
dev = True
if profile_memory and not new_env:
echo_warning('Ignoring --profile-memory, to utilize that you must also select --new-env')
for check, envs in check_envs:
if not envs:
echo_warning('No end-to-end environments found for `{}`'.format(check))
continue
config_envs = get_configured_envs(check)
# For performance reasons we're generating what to test on the fly and therefore
# need a way to tell if anything ran since we don't know anything upfront.
tests_ran = True
for env in envs:
if new_env:
ctx.invoke(
start,
check=check,
env=env,
agent=agent,
python=python,
dev=dev,
base=base,
env_vars=env_vars,
profile_memory=profile_memory,
)
elif env not in config_envs:
continue
environment = create_interface(check, env)
persisted_env_vars = environment.metadata.get('env_vars', {})
try:
with EnvVars(persisted_env_vars):
ctx.invoke(
test_command,
checks=['{}:{}'.format(check, env)],
e2e=True,
passenv=' '.join(persisted_env_vars) if persisted_env_vars else None,
)
finally:
if new_env:
ctx.invoke(stop, check=check, env=env)
if not tests_ran:
echo_info('Nothing to test!')
| 35.690909 | 118 | 0.611055 |
4ea25bea64693efbd67e1d8d1323dd7e269f115f
| 4,472 |
py
|
Python
|
advent_of_code/year2019/day7/intcode.py
|
Tenebrar/codebase
|
59c9a35289fb29afedad0e3edd0519b67372ef9f
|
[
"Unlicense"
] | 1 |
2020-04-21T11:39:25.000Z
|
2020-04-21T11:39:25.000Z
|
advent_of_code/year2019/day7/intcode.py
|
Tenebrar/codebase
|
59c9a35289fb29afedad0e3edd0519b67372ef9f
|
[
"Unlicense"
] | 7 |
2020-02-12T01:08:01.000Z
|
2022-02-10T11:56:56.000Z
|
advent_of_code/year2019/day7/intcode.py
|
Tenebrar/codebase
|
59c9a35289fb29afedad0e3edd0519b67372ef9f
|
[
"Unlicense"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import List, Dict, Callable
class Operation(ABC):
@abstractmethod
def execute(self, computer: 'IntcodeComputer', parameter_modes: str):
...
class ReadReadWriteOperation(Operation, ABC):
def execute(self, computer: 'IntcodeComputer', parameter_modes: str):
parameter1 = computer._get_parameter(parameter_modes, 1)
parameter2 = computer._get_parameter(parameter_modes, 2)
computer._set_parameter(3, self.get_value(parameter1, parameter2))
computer.index += 4
@abstractmethod
def get_value(self, parameter1: int, parameter2: int):
...
class AddOperation(ReadReadWriteOperation):
def get_value(self, parameter1: int, parameter2: int):
return parameter1 + parameter2
class MultiplyOperation(ReadReadWriteOperation):
def get_value(self, parameter1: int, parameter2: int):
return parameter1 * parameter2
class InputOperation(Operation):
def execute(self, computer: 'IntcodeComputer', parameter_modes: str):
if not computer.inputs:
raise StopIteration() # Stop execution until input is added
computer._set_parameter(1, computer.inputs.pop(0))
computer.index += 2
class OutputOperation(Operation):
def execute(self, computer: 'IntcodeComputer', parameter_modes: str):
parameter1 = computer._get_parameter(parameter_modes, 1)
computer.outputs.append(parameter1)
computer.index += 2
class ConditionalJumpOperation(Operation, ABC):
def execute(self, computer: 'IntcodeComputer', parameter_modes: str):
parameter1 = computer._get_parameter(parameter_modes, 1)
parameter2 = computer._get_parameter(parameter_modes, 2)
if self.condition(parameter1):
computer.index = parameter2
else:
computer.index += 3
@abstractmethod
def condition(self, parameter1: int) -> bool:
...
class JumpIfNonZeroOperation(ConditionalJumpOperation):
def condition(self, parameter1: int) -> bool:
return parameter1 != 0
class JumpIfZeroOperation(ConditionalJumpOperation):
def condition(self, parameter1: int) -> bool:
return parameter1 == 0
class LessThanOperation(ReadReadWriteOperation):
def get_value(self, parameter1: int, parameter2: int):
return 1 if parameter1 < parameter2 else 0
class EqualsOperation(ReadReadWriteOperation):
def get_value(self, parameter1: int, parameter2: int):
return 1 if parameter1 == parameter2 else 0
class StopOperation(Operation):
def execute(self, computer: 'IntcodeComputer', parameter_modes: str):
computer.done = True
raise StopIteration()
class IntcodeComputer:
@classmethod
def from_string(cls, program: str):
return IntcodeComputer([int(value) for value in program.split(',')])
def __init__(self, program: List[int]):
self.program: List[int] = program
self.inputs: List[int] = []
self.outputs: List[int] = []
self.index: int = 0
self.done = False
self.parameter_modes: Dict[str, Callable[[int], int]] = {
'0': lambda value: self.program[value],
'1': lambda value: value
}
self.operations: Dict[int, Operation] = {
1: AddOperation(),
2: MultiplyOperation(),
3: InputOperation(),
4: OutputOperation(),
5: JumpIfNonZeroOperation(),
6: JumpIfZeroOperation(),
7: LessThanOperation(),
8: EqualsOperation(),
99: StopOperation()
}
def add_input(self, value: int) -> None:
self.inputs.append(value)
def get_output(self) -> int:
return self.outputs.pop(0)
def _get_parameter(self, parameter_modes, parameter_number):
value = self.program[self.index + parameter_number]
return self.parameter_modes[parameter_modes[parameter_number - 1]](value)
def _set_parameter(self, parameter_number, value):
self.program[self.program[self.index + parameter_number]] = value
def run(self):
try:
while True:
op_code = self.program[self.index] % 100
parameter_modes = f'000{self.program[self.index] // 100}'[::-1] # Zero-padded for convenience
self.operations[op_code].execute(self, parameter_modes)
except StopIteration:
pass
| 30.841379 | 110 | 0.658318 |
df0b2e4277553e0202d0deafaca1109e8d615173
| 328 |
py
|
Python
|
davidgoliath/project/modelling/21_geometric.py
|
spideynolove/Other-repo
|
34066f177994415d031183ab9dd219d787e6e13a
|
[
"MIT"
] | null | null | null |
davidgoliath/project/modelling/21_geometric.py
|
spideynolove/Other-repo
|
34066f177994415d031183ab9dd219d787e6e13a
|
[
"MIT"
] | null | null | null |
davidgoliath/project/modelling/21_geometric.py
|
spideynolove/Other-repo
|
34066f177994415d031183ab9dd219d787e6e13a
|
[
"MIT"
] | null | null | null |
# Geometric distribution python
# https://www.google.com/search?q=Geometric+distribution+python&oq=Geometric+distribution+python&aqs=chrome..69i57j0l2j0i22i30l7.2540j0j4&sourceid=chrome&ie=UTF-8
''' Discrete distributions
# https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.geom.html#scipy.stats.geom
'''
| 46.857143 | 162 | 0.801829 |
4a291b8e70dede11468340319d091a45004a5d19
| 849 |
py
|
Python
|
FPLManager/caching.py
|
twhi/fpl_price_change_predictor
|
0cf0de11af7637d6cd83fdadf9ff381fc6d172f5
|
[
"MIT"
] | 1 |
2019-02-15T13:48:48.000Z
|
2019-02-15T13:48:48.000Z
|
FPLManager/caching.py
|
twhi/fpl_price_change_predictor
|
0cf0de11af7637d6cd83fdadf9ff381fc6d172f5
|
[
"MIT"
] | 1 |
2021-06-01T23:15:32.000Z
|
2021-06-01T23:15:32.000Z
|
FPLManager/caching.py
|
twhi/FPLManager
|
0cf0de11af7637d6cd83fdadf9ff381fc6d172f5
|
[
"MIT"
] | null | null | null |
import pickle
def save_to_pickle(variable, filename):
with open(filename, 'wb') as handle:
pickle.dump(variable, handle)
def open_pickle(path_to_file):
with open(path_to_file, 'rb') as handle:
f = pickle.load(handle)
return f
class Caching:
def __init__(self):
self.access_list = [
'account_data',
'master_table',
'team_list',
'team_info',
'player_price_data',
'player_stats_data',
'player_top_50_data',
'team_ids',
'username_hash',
]
@staticmethod
def get_cached_data(fname):
return open_pickle('./data/' + fname + '.pickle')
def cache_data(self, fname):
out = {}
for d in self.access_list:
out[d] = getattr(self, d)
save_to_pickle(out, './data/' + fname + '.pickle')
| 24.970588 | 58 | 0.580683 |
7cdba495eeabbf8e4018da3efd9849838949ee9e
| 2,231 |
py
|
Python
|
tools/copy_partitions.py
|
yuzi40277738/openHASP
|
e5332a3aad19a399194bcf31add3bcacf3e2c130
|
[
"MIT"
] | 191 |
2021-04-02T18:20:34.000Z
|
2022-03-27T23:37:22.000Z
|
tools/copy_partitions.py
|
yuzi40277738/openHASP
|
e5332a3aad19a399194bcf31add3bcacf3e2c130
|
[
"MIT"
] | 82 |
2021-04-02T14:37:32.000Z
|
2022-03-31T23:33:37.000Z
|
tools/copy_partitions.py
|
yuzi40277738/openHASP
|
e5332a3aad19a399194bcf31add3bcacf3e2c130
|
[
"MIT"
] | 72 |
2021-04-11T14:46:02.000Z
|
2022-03-31T14:33:15.000Z
|
#This script is based on the Tasmota rename-firmware.py script. https://github.com/arendst/Tasmota
Import('env')
import os
import shutil
buildFlags = env.ParseFlags(env['BUILD_FLAGS'])
OUTPUT_DIR = "build_output{}".format(os.path.sep)
platform = env.PioPlatform()
FRAMEWORK_DIR = platform.get_package_dir("framework-arduinoespressif32")
FRAMEWORK_DIR = "{}{}".format(FRAMEWORK_DIR, os.path.sep)
def copy_boot_partitions(source, target, env):
# check if output directories exist and create if necessary
if not os.path.isdir(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
for d in ['firmware', 'map']:
if not os.path.isdir("{}{}".format(OUTPUT_DIR, d)):
os.mkdir("{}{}".format(OUTPUT_DIR, d))
# create string with location and file names based on variant
src = str(target[0])
dst = "{}firmware{}{}".format(OUTPUT_DIR, os.path.sep, "partitions.bin")
print(src)
print(dst)
# check if new target files exist and remove if necessary
for f in [dst]:
if os.path.isfile(f):
os.remove(f)
# copy firmware.bin to firmware/<variant>.bin
shutil.copy(src,dst)
# create string with location and file names based on variant
src = "{}tools{}partitions{}boot_app0.bin".format(FRAMEWORK_DIR, os.path.sep, os.path.sep, os.path.sep)
dst = "{}firmware{}{}".format(OUTPUT_DIR, os.path.sep, "boot_app0.bin")
print(src)
print(dst)
# check if new target files exist and remove if necessary
for f in [dst]:
if os.path.isfile(f):
os.remove(f)
# copy firmware.bin to firmware/<variant>.bin
shutil.copy(src,dst)
# create string with location and file names based on variant
src = "{}tools{}sdk{}bin{}bootloader_dio_40m.bin".format(FRAMEWORK_DIR, os.path.sep, os.path.sep, os.path.sep, os.path.sep)
dst = "{}firmware{}{}".format(OUTPUT_DIR, os.path.sep, "bootloader_dio_40m.bin")
print(src)
print(dst)
# check if new target files exist and remove if necessary
for f in [dst]:
if os.path.isfile(f):
os.remove(f)
# copy firmware.bin to firmware/<variant>.bin
shutil.copy(src,dst)
env.AddPostAction("$BUILD_DIR/partitions.bin", [copy_boot_partitions])
| 31.871429 | 127 | 0.66831 |
f75b3271013111802187c197e90c5479823898ca
| 8,262 |
py
|
Python
|
pymagnitude/third_party/allennlp/semparse/worlds/atis_world.py
|
tpeng/magnitude
|
aec98628b5547773ca8c4114ec6d1ad51e21b230
|
[
"MIT"
] | 1,520 |
2018-03-01T13:37:49.000Z
|
2022-03-25T11:40:20.000Z
|
pymagnitude/third_party/allennlp/semparse/worlds/atis_world.py
|
tpeng/magnitude
|
aec98628b5547773ca8c4114ec6d1ad51e21b230
|
[
"MIT"
] | 87 |
2018-03-03T15:12:50.000Z
|
2022-02-21T15:24:12.000Z
|
pymagnitude/third_party/allennlp/semparse/worlds/atis_world.py
|
tpeng/magnitude
|
aec98628b5547773ca8c4114ec6d1ad51e21b230
|
[
"MIT"
] | 121 |
2018-03-03T08:40:53.000Z
|
2022-03-16T05:19:38.000Z
|
from __future__ import absolute_import
from copy import deepcopy
#typing
import numpy
from parsimonious.grammar import Grammar
from allennlp.semparse.contexts.atis_tables import * # pylint: disable=wildcard-import,unused-wildcard-import
from allennlp.semparse.contexts.sql_table_context import\
SqlTableContext, SqlVisitor, generate_one_of_string, format_action
from allennlp.data.tokenizers import Token, WordTokenizer
try:
from itertools import izip
except:
izip = zip
def get_strings_from_utterance(tokenized_utterance ) :
u"""
Based on the current utterance, return a dictionary where the keys are the strings in the utterance
that map to lists of the token indices that they are linked to.
"""
string_linking_scores = defaultdict(list)
for index, (first_token, second_token) in enumerate(izip(tokenized_utterance, tokenized_utterance[1:])):
for string in ATIS_TRIGGER_DICT.get(first_token.text.lower(), []):
string_linking_scores[string].append(index)
bigram = "{first_token.text} {second_token.text}".lower()
for string in ATIS_TRIGGER_DICT.get(bigram, []):
string_linking_scores[string].extend([index, index + 1])
if tokenized_utterance[-1].text.lower() in ATIS_TRIGGER_DICT:
for string in ATIS_TRIGGER_DICT[tokenized_utterance[-1].text.lower()]:
string_linking_scores[string].append(len(tokenized_utterance)-1)
date = get_date_from_utterance(tokenized_utterance)
if date:
for day in DAY_OF_WEEK_INDEX[date.weekday()]:
string_linking_scores[day] = []
return string_linking_scores
class AtisWorld(object):
u"""
World representation for the Atis SQL domain. This class has a ``SqlTableContext`` which holds the base
grammars, it then augments this grammar with the entities that are detected from utterances.
Parameters
----------
utterances: ``List[str]``
A list of utterances in the interaction, the last element in this list is the
current utterance that we are interested in.
"""
sql_table_context = SqlTableContext(TABLES)
def __init__(self, utterances , tokenizer=None) :
self.utterances = utterances
self.tokenizer = tokenizer if tokenizer else WordTokenizer()
self.tokenized_utterances = [self.tokenizer.tokenize(utterance) for utterance in self.utterances]
valid_actions, linking_scores = self.init_all_valid_actions()
self.valid_actions = valid_actions
# This has shape (num_entities, num_utterance_tokens).
self.linking_scores: numpy.ndarray = linking_scores
self.grammar_str: unicode = self.get_grammar_str()
self.grammar_with_context: Grammar = Grammar(self.grammar_str)
def get_valid_actions(self) :
return self.valid_actions
def init_all_valid_actions(self) :
u"""
We initialize the valid actions with the global actions. We then iterate through the
utterances up to and including the current utterance and add the valid strings.
"""
valid_actions = deepcopy(self.sql_table_context.valid_actions)
linking_scores = []
current_tokenized_utterance = [] if not self.tokenized_utterances\
else self.tokenized_utterances[-1]
strings = set()
for tokenized_utterance in self.tokenized_utterances:
string_linking_dict = get_strings_from_utterance(tokenized_utterance)
strings.update(list(string_linking_dict.keys()))
# We want to sort things in reverse here to be consistent with the grammar.
# The parser is greedy which means that if we have a rule that has
# multiple options for the right hand side, the first one that succeeds is
# the one that is used. For example, if ``1400`` appears in the query, and
# both ``1400`` and ``1`` are valid numbers, then we want to try to match
# ``1400`` first. Otherwise, ``1`` will succeed but nothing will match ``400``.
# The same applies for strings here.
strings_list = sorted(strings, reverse=True)
# We construct the linking scores for strings from the ``string_linking_dict`` here.
string_linking_scores = []
for string in strings_list:
entity_linking = [0 for token in current_tokenized_utterance]
# string_linking_dict has the strings and linking scores from the last utterance.
# If the string is not in the last utterance, then the linking scores will be all 0.
for token_index in string_linking_dict.get(string, []):
entity_linking[token_index] = 1
string_linking_scores.append(entity_linking)
linking_scores.extend(string_linking_scores)
for string in strings_list:
action = format_action(u'string', string)
if action not in valid_actions[u'string']:
valid_actions[u'string'].append(action)
numbers = set([u'0', u'1'])
number_linking_dict = {}
for utterance, tokenized_utterance in izip(self.utterances, self.tokenized_utterances):
number_linking_dict = get_numbers_from_utterance(utterance, tokenized_utterance)
numbers.update(list(number_linking_dict.keys()))
numbers_list = sorted(numbers, reverse=True)
# We construct the linking scores for numbers from the ``number_linking_dict`` here.
number_linking_scores = []
for number in numbers_list:
entity_linking = [0 for token in current_tokenized_utterance]
# number_linking_scores has the numbers and linking scores from the last utterance.
# If the number is not in the last utterance, then the linking scores will be all 0.
for token_index in number_linking_dict.get(number, []):
entity_linking[token_index] = 1
number_linking_scores.append(entity_linking)
linking_scores.extend(number_linking_scores)
for number in list(numbers_list):
action = format_action(u'number', number)
valid_actions[u'number'].append(action)
return valid_actions, numpy.array(linking_scores)
def get_grammar_str(self) :
u"""
Generate a string that can be used to instantiate a ``Grammar`` object. The string is a sequence of
rules that define the grammar.
"""
grammar_str_with_context = self.sql_table_context.grammar_str
numbers = [number.split(u" -> ")[1].lstrip(u'["').rstrip(u'"]') for\
number in sorted(self.valid_actions[u'number'], reverse=True)]
strings = [string .split(u" -> ")[1].lstrip(u'["').rstrip(u'"]') for\
string in sorted(self.valid_actions[u'string'], reverse=True)]
grammar_str_with_context += generate_one_of_string(u"number", numbers)
grammar_str_with_context += generate_one_of_string(u"string", strings)
return grammar_str_with_context
def get_action_sequence(self, query ) :
sql_visitor = SqlVisitor(self.grammar_with_context)
if query:
action_sequence = sql_visitor.parse(query)
return action_sequence
return []
def all_possible_actions(self) :
u"""
Return a sorted list of strings representing all possible actions
of the form: nonterminal -> [right_hand_side]
"""
all_actions = set()
for _, action_list in list(self.valid_actions.items()):
for action in action_list:
all_actions.add(action)
return sorted(all_actions)
def __eq__(self, other):
if isinstance(self, other.__class__):
return all([self.valid_actions == other.valid_actions,
numpy.array_equal(self.linking_scores, other.linking_scores),
self.utterances == other.utterances,
self.grammar_str == other.grammar_str])
return False
| 46.156425 | 109 | 0.659647 |
03ab7df3acb6025094fae83201ad97007d831665
| 5,900 |
py
|
Python
|
notebooks/forrester2007/function_defs.py
|
sjvrijn/multi-level-co-surrogates
|
04a071eb4360bed6f1a517531690beec7857e3e5
|
[
"MIT"
] | null | null | null |
notebooks/forrester2007/function_defs.py
|
sjvrijn/multi-level-co-surrogates
|
04a071eb4360bed6f1a517531690beec7857e3e5
|
[
"MIT"
] | 2 |
2021-02-25T14:07:50.000Z
|
2021-02-25T14:12:35.000Z
|
notebooks/forrester2007/function_defs.py
|
sjvrijn/multi-level-co-surrogates
|
04a071eb4360bed6f1a517531690beec7857e3e5
|
[
"MIT"
] | null | null | null |
import sys
from itertools import product
import matplotlib.pyplot as plt
import numpy as np
from IPython.core.display import clear_output
from matplotlib import colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from pyDOE import lhs
from pyprojroot import here
module_path = str(here())
if module_path not in sys.path:
sys.path.append(module_path)
import multiLevelCoSurrogates as mlcs
def low_random_sample(ndim, nlow):
return np.random.rand(nlow, ndim)
def low_lhs_sample(ndim, nlow):
if ndim == 1:
return np.linspace(0,1,nlow).reshape(-1,1)
elif ndim > 1:
return lhs(ndim, nlow)
def create_mse_tracking(func, sample_generator,
max_high=40, max_low=100, num_reps=30,
min_high=2, min_low=3):
ndim = func.ndim
mse_tracking = np.empty((max_high+1, max_low+1, num_reps, 3))
mse_tracking[:] = np.nan
cases = list(product(range(min_high, max_high+1), range(min_low, max_low+1), range(num_reps)))
for idx, case in enumerate(cases):
num_high, num_low, rep = case
if num_high >= num_low:
continue
if idx % 100 == 0:
clear_output()
print(f'{idx}/{len(cases)}')
low_x = sample_generator(ndim, num_low)
high_x = low_x[np.random.choice(num_low, num_high, replace=False)]
archive = mlcs.CandidateArchive(ndim=ndim, fidelities=['high', 'low', 'high-low'])
archive.addcandidates(low_x, func.low(low_x), fidelity='low')
archive.addcandidates(high_x, func.high(high_x), fidelity='high')
mfbo = mlcs.MultiFidelityBO(func, archive)
mse_tracking[num_high, num_low, rep] = mfbo.getMSE()
clear_output()
print(f'{len(cases)}/{len(cases)}')
return mse_tracking
def plot_high_vs_low_num_samples(data, title, vmin=.5, vmax=100, save_as=None):
norm = colors.LogNorm(vmin=vmin, vmax=vmax, clip=True)
fig, ax = plt.subplots(figsize=(9,3.5))
ax.set_aspect(1.)
data = data.median(dim='rep')
plt.title(f'Median MSE for high (hierarchical) model - {title}')
img = ax.imshow(data.sel(model='high_hier'), cmap='viridis_r', norm=norm, origin='lower')
divider = make_axes_locatable(ax)
axx = divider.append_axes("bottom", size=.2, pad=0.05, sharex=ax)
axy = divider.append_axes("left", size=.2, pad=0.05, sharey=ax)
ax.xaxis.set_tick_params(labelbottom=False)
ax.yaxis.set_tick_params(labelleft=False)
axy.xaxis.set_tick_params(labelbottom=False)
axx.yaxis.set_tick_params(labelleft=False)
img = axy.imshow(data.sel(model='high').mean(dim='n_low').values.reshape(-1,1), cmap='viridis_r', norm=norm, origin='lower')
img = axx.imshow(data.sel(model='low').mean(dim='n_high').values.reshape(1,-1), cmap='viridis_r', norm=norm, origin='lower')
fig.colorbar(img, ax=ax, orientation='vertical')
axy.set_ylabel('#High-fid samples')
axx.set_xlabel('#Low-fid samples')
plt.tight_layout()
if save_as:
plt.savefig(save_as)
plt.show()
def plot_high_vs_low_num_samples_diff(data, title, max_diff=None, save_as=None):
paired_diffs = data.sel(model='high') - data.sel(model='high_hier')
to_plot = paired_diffs.median(dim='rep')
if max_diff is None:
max_diff = 2*min(abs(np.nanmin(to_plot)), np.nanmax(to_plot))
norm = colors.SymLogNorm(linthresh=.01, vmin=-max_diff, vmax=max_diff, clip=True)
long_title = f'Median of paired (high (hierarchical) - high (direct)) MSE - {title}'
plot_high_v_low(long_title, norm, save_as, to_plot)
def plot_inter_method_diff(data_A, data_B, name, max_diff=None, save_as=None):
to_plot = np.nanmedian(data_A.sel(model='high_hier') - data_B.sel(model='high_hier'), axis=2)
if max_diff is None:
max_diff = 2*min(abs(np.nanmin(to_plot)), np.nanmax(to_plot))
norm = colors.Normalize(vmin=-max_diff, vmax=max_diff, clip=True)
long_title = f'high (hierarchical) MSE: {name}'
plot_high_v_low(long_title, norm, save_as, to_plot)
def plot_high_v_low(long_title, norm, save_as, to_plot):
fig, ax = plt.subplots(figsize=(9, 3.5))
img = ax.imshow(to_plot, cmap='RdYlGn', norm=norm, origin='lower')
fig.colorbar(img, ax=ax, orientation='vertical')
ax.set_ylabel('#High-fid samples')
ax.set_xlabel('#Low-fid samples')
plt.title(long_title)
plt.tight_layout()
if save_as:
plt.savefig(save_as)
plt.show()
# defining some point styles
red_dot = {'marker': '.', 'color': 'red'}
blue_circle = {'marker': 'o', 'facecolors': 'none', 'color': 'blue'}
def create_models_and_compare(func, low, high, steps=None, save_as=None):
archive = mlcs.CandidateArchive(ndim=2, fidelities=['high', 'low', 'high-low'])
archive.addcandidates(low, func.low(low), fidelity='low')
archive.addcandidates(high, func.high(high), fidelity='high')
mfbo = mlcs.MultiFidelityBO(func, archive, schema=[1,1])
surf_high = mlcs.createsurface(func.high, u_bound=func.u_bound, l_bound=func.l_bound, step=steps)
surf_low = mlcs.createsurface(func.low, u_bound=func.u_bound, l_bound=func.l_bound, step=steps)
surf_high_model = mlcs.createsurface(mfbo.models['high'].predict, u_bound=func.u_bound, l_bound=func.l_bound, step=steps)
surf_low_model = mlcs.createsurface(mfbo.models['low'].predict, u_bound=func.u_bound, l_bound=func.l_bound, step=steps)
points_high = [mlcs.ScatterPoints(*archive.getcandidates(fidelity='high'), red_dot)]
points_low = [mlcs.ScatterPoints(*archive.getcandidates(fidelity='low'), blue_circle)]
points = [
points_high, points_low,
points_high, points_low,
]
mlcs.plotsurfaces([surf_high, surf_low, surf_high_model, surf_low_model], shape=(2,2),
titles=['high', 'low', 'high (hierarchical model)', 'low (model)'], all_points=points,
save_as=save_as)
| 35.97561 | 128 | 0.682373 |
79d9b1d5bd657c57090e7baddbc06970988e428d
| 661 |
py
|
Python
|
setup.py
|
apparentlymart/python-tfplugin
|
9f1e5c463df9368928bb79188058bc474386b5ba
|
[
"MIT"
] | 2 |
2019-09-08T23:33:56.000Z
|
2022-01-19T01:29:20.000Z
|
setup.py
|
apparentlymart/python-tfplugin
|
9f1e5c463df9368928bb79188058bc474386b5ba
|
[
"MIT"
] | null | null | null |
setup.py
|
apparentlymart/python-tfplugin
|
9f1e5c463df9368928bb79188058bc474386b5ba
|
[
"MIT"
] | null | null | null |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name="tfplugin",
version="dev",
author="Martin Atkins",
author_email="[email protected]",
description="Implement Terraform plugins in Python",
packages=['tfplugin'],
install_requires=[
'protobuf>=3.6.1',
],
setup_requires=[
'nose>=1.0',
],
tests_require=[
'nose>=1.0',
'coverage',
'mock',
'pep8',
],
test_suite='nose.collector',
classifiers=[
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
],
)
| 19.441176 | 56 | 0.574887 |
5e4217ecb1548dfbf5f3509dd6285b89e9baada5
| 3,457 |
py
|
Python
|
main_pretraining.py
|
cypressd1999/FYP_2021
|
d836a355b1513bbca1f1429650ddf670f7b13994
|
[
"Apache-2.0"
] | null | null | null |
main_pretraining.py
|
cypressd1999/FYP_2021
|
d836a355b1513bbca1f1429650ddf670f7b13994
|
[
"Apache-2.0"
] | null | null | null |
main_pretraining.py
|
cypressd1999/FYP_2021
|
d836a355b1513bbca1f1429650ddf670f7b13994
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 27 11:16:26 2019
@author: weetee
"""
from src.preprocessing_funcs import load_dataloaders
from src.trainer import train_and_fit
import logging
from argparse import ArgumentParser
'''
This trains the BERT model on matching the blanks
'''
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', \
datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
logger = logging.getLogger('__file__')
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--pretrain_data", type=str, default="./original_data/cnn.txt", \
help="pre-training data .txt file path")
parser.add_argument("--batch_size", type=int, default=32, help="Training batch size")
parser.add_argument("--freeze", type=int, default=0, help='''1: Freeze most layers until classifier layers\
\n0: Don\'t freeze \
(Probably best not to freeze if GPU memory is sufficient)''')
parser.add_argument("--gradient_acc_steps", type=int, default=2, help="No. of steps of gradient accumulation")
parser.add_argument("--max_norm", type=float, default=1.0, help="Clipped gradient norm")
parser.add_argument("--fp16", type=int, default=0, help="1: use mixed precision ; 0: use floating point 32") # mixed precision doesn't seem to train well
parser.add_argument("--num_epochs", type=int, default=18, help="No of epochs")
parser.add_argument("--lr", type=float, default=0.0001, help="learning rate")
parser.add_argument("--model_no", type=int, default=0, help='''Model ID: 0 - BERT\n
1 - ALBERT\n
2 - BioBERT''')
parser.add_argument("--model_size", type=str, default='bert-base-uncased', help="For BERT: 'bert-base-uncased', \
'bert-large-uncased',\
For ALBERT: 'albert-base-v2',\
'albert-large-v2',\
For BioBERT: 'bert-base-uncased' (biobert_v1.1_pubmed)")
args = parser.parse_args()
output = train_and_fit(args)
'''
# For testing additional models
from src.model.BERT.modeling_bert import BertModel, BertConfig
from src.model.BERT.tokenization_bert import BertTokenizer as Tokenizer
config = BertConfig.from_pretrained('./additional_models/biobert_v1.1_pubmed/bert_config.json')
model = BertModel.from_pretrained(pretrained_model_name_or_path='./additional_models/biobert_v1.1_pubmed.bin',
config=config,
force_download=False, \
model_size='bert-base-uncased',
task='classification',\
n_classes_=12)
tokenizer = Tokenizer(vocab_file='./additional_models/biobert_v1.1_pubmed/vocab.txt',
do_lower_case=False)
'''
| 56.672131 | 157 | 0.534278 |
dc63cd77835e1c34e3162b25c11d58ac3fa88ed4
| 2,847 |
py
|
Python
|
Halloween_Countdown_Matrix/code.py
|
albinger/Adafruit_Learning_System_Guides
|
4fe2da261fe5d1ca282b86bd3b93ee1466346fa7
|
[
"MIT"
] | null | null | null |
Halloween_Countdown_Matrix/code.py
|
albinger/Adafruit_Learning_System_Guides
|
4fe2da261fe5d1ca282b86bd3b93ee1466346fa7
|
[
"MIT"
] | null | null | null |
Halloween_Countdown_Matrix/code.py
|
albinger/Adafruit_Learning_System_Guides
|
4fe2da261fe5d1ca282b86bd3b93ee1466346fa7
|
[
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: 2020 John Park for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import time
import board
from adafruit_matrixportal.matrixportal import MatrixPortal
EVENT_YEAR = 2021
EVENT_MONTH = 10
EVENT_DAY = 31
EVENT_HOUR = 17
EVENT_MINUTE = 0
FRAME_DURATION = 3
FRAMES = (
"bmps/jack.bmp",
"DAYS",
"bmps/ghost.bmp",
"HOURS",
"bmps/bats.bmp",
"MINUTES",
"bmps/skull.bmp",
"bmps/halloween.bmp",
)
EVENT_DAY_IMAGE = "bmps/happy_halloween.bmp"
SYNCHRONIZE_CLOCK = True
# --- Display setup ---
matrixportal = MatrixPortal(status_neopixel=board.NEOPIXEL, debug=True)
current_frame = None
# Create a new label with the color and text selected
matrixportal.add_text(
text_font="fonts/Arial-12.bdf",
text_position=(4, (matrixportal.graphics.display.height // 2) - 1),
text_color=0xEF7F31,
)
def set_time_until(unit=None):
event_time = time.struct_time(
(
EVENT_YEAR,
EVENT_MONTH,
EVENT_DAY,
EVENT_HOUR,
EVENT_MINUTE,
0, # we don't track seconds
-1,
-1,
False,
)
)
remaining = time.mktime(event_time) - time.mktime(time.localtime())
if remaining <= 0:
# oh, its event time!
matrixportal.set_background(EVENT_DAY_IMAGE)
return
remaining //= 60
mins_remaining = remaining % 60
remaining //= 60
hours_remaining = remaining % 24
remaining //= 24
days_remaining = remaining
if unit == "DAYS":
text = "{} day".format(days_remaining)
if days_remaining != 1:
text += "s"
if unit == "HOURS":
text = "{} hour".format(hours_remaining)
if hours_remaining != 1:
text += "s"
if unit == "MINUTES":
text = "{} min".format(mins_remaining)
if mins_remaining != 1:
text += "s"
matrixportal.set_text(text)
matrixportal.set_background(0)
def set_next_frame():
# pylint: disable=global-statement
global current_frame
# Advance to next frame if we already have one
if current_frame is not None:
current_frame += 1
# Loop back or set initial frame
if current_frame is None or current_frame >= len(FRAMES):
current_frame = 0
# Check if Picture or Text
print(FRAMES[current_frame])
if FRAMES[current_frame][-4:] == ".bmp":
matrixportal.set_background(FRAMES[current_frame])
matrixportal.set_text("")
else:
set_time_until(FRAMES[current_frame])
# Simulate the delay in case fetching time is fast
set_next_frame()
start_time = time.monotonic()
if SYNCHRONIZE_CLOCK:
matrixportal.get_local_time()
while time.monotonic() < start_time + FRAME_DURATION:
pass
while True:
set_next_frame()
time.sleep(FRAME_DURATION)
| 24.333333 | 71 | 0.638918 |
6adaf398cefef7ea1034a99b20566fee93459175
| 1,298 |
py
|
Python
|
modelzoo/DIEN/data/script/history_behavior_list.py
|
aalbersk/DeepRec
|
f673a950780959b44dcda99398880a1d883ab338
|
[
"Apache-2.0"
] | 292 |
2021-12-24T03:24:33.000Z
|
2022-03-31T15:41:05.000Z
|
modelzoo/DIEN/data/script/history_behavior_list.py
|
aalbersk/DeepRec
|
f673a950780959b44dcda99398880a1d883ab338
|
[
"Apache-2.0"
] | 54 |
2021-12-24T06:40:09.000Z
|
2022-03-30T07:57:24.000Z
|
modelzoo/DIEN/data/script/history_behavior_list.py
|
aalbersk/DeepRec
|
f673a950780959b44dcda99398880a1d883ab338
|
[
"Apache-2.0"
] | 75 |
2021-12-24T04:48:21.000Z
|
2022-03-29T10:13:39.000Z
|
item_to_cate_map = {}
with open('item2catmap.txt', 'r') as f:
for line in f:
linelist = line.strip().split('\t')
item = linelist[0]
cate = linelist[1]
item_to_cate_map[item] = cate
user_history_behavior = {}
with open('reviews-info', 'r') as f:
for line in f:
linelist = line.strip().split('\t')
uid = linelist[0]
item = linelist[1]
if uid not in user_history_behavior:
user_history_behavior[uid] = [item]
else:
if item not in user_history_behavior[uid]:
user_history_behavior[uid].append(item)
FirstLine = True
with open('user_history_behavior.txt', 'w') as f:
for uid, items in user_history_behavior.items():
itemstr = ''
catestr = ''
for i in items:
if i in item_to_cate_map:
c = item_to_cate_map[i]
else:
c = 'Unknown'
if not itemstr:
itemstr += i
catestr += c
else:
itemstr += ('' + i)
catestr += ('' + c)
if FirstLine:
f.write(uid + '\t' + itemstr + '\t' + catestr)
FirstLine = False
else:
f.write('\n' + uid + '\t' + itemstr + '\t' + catestr)
| 30.904762 | 65 | 0.501541 |
d5d9223ed0e9922501535057cea298170d33ec43
| 33,184 |
py
|
Python
|
tests/test_secretsmanager/test_server.py
|
thomassross/moto
|
407d5c853dbee9b9e132d97b41414b7dca475765
|
[
"Apache-2.0"
] | null | null | null |
tests/test_secretsmanager/test_server.py
|
thomassross/moto
|
407d5c853dbee9b9e132d97b41414b7dca475765
|
[
"Apache-2.0"
] | 4 |
2017-09-30T07:52:52.000Z
|
2021-12-13T06:56:55.000Z
|
tests/test_secretsmanager/test_server.py
|
thomassross/moto
|
407d5c853dbee9b9e132d97b41414b7dca475765
|
[
"Apache-2.0"
] | 2 |
2021-11-24T08:05:43.000Z
|
2021-11-25T16:18:48.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import boto3
import pytest
import sure # noqa
import moto.server as server
from moto import mock_secretsmanager, mock_lambda, mock_iam, mock_logs, settings
from tests.test_awslambda.test_lambda import get_test_zip_file1
"""
Test the different server responses for secretsmanager
"""
DEFAULT_SECRET_NAME = "test-secret"
@mock_secretsmanager
def test_get_secret_value():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foo-secret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
get_secret = test_client.post(
"/",
data={"SecretId": DEFAULT_SECRET_NAME, "VersionStage": "AWSCURRENT"},
headers={"X-Amz-Target": "secretsmanager.GetSecretValue"},
)
json_data = json.loads(get_secret.data.decode("utf-8"))
assert json_data["SecretString"] == "foo-secret"
@mock_secretsmanager
def test_get_secret_that_does_not_exist():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
get_secret = test_client.post(
"/",
data={"SecretId": "i-dont-exist", "VersionStage": "AWSCURRENT"},
headers={"X-Amz-Target": "secretsmanager.GetSecretValue"},
)
json_data = json.loads(get_secret.data.decode("utf-8"))
assert json_data["message"] == "Secrets Manager can't find the specified secret."
assert json_data["__type"] == "ResourceNotFoundException"
@mock_secretsmanager
def test_get_secret_that_does_not_match():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foo-secret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
get_secret = test_client.post(
"/",
data={"SecretId": "i-dont-match", "VersionStage": "AWSCURRENT"},
headers={"X-Amz-Target": "secretsmanager.GetSecretValue"},
)
json_data = json.loads(get_secret.data.decode("utf-8"))
assert json_data["message"] == "Secrets Manager can't find the specified secret."
assert json_data["__type"] == "ResourceNotFoundException"
@mock_secretsmanager
def test_get_secret_that_has_no_value():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
get_secret = test_client.post(
"/",
data={"SecretId": DEFAULT_SECRET_NAME},
headers={"X-Amz-Target": "secretsmanager.GetSecretValue"},
)
json_data = json.loads(get_secret.data.decode("utf-8"))
assert (
json_data["message"]
== "Secrets Manager can't find the specified secret value for staging label: AWSCURRENT"
)
assert json_data["__type"] == "ResourceNotFoundException"
@mock_secretsmanager
def test_create_secret():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
res = test_client.post(
"/",
data={"Name": "test-secret", "SecretString": "foo-secret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
res_2 = test_client.post(
"/",
data={"Name": "test-secret-2", "SecretString": "bar-secret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
json_data = json.loads(res.data.decode("utf-8"))
assert json_data["ARN"] != ""
assert json_data["Name"] == "test-secret"
json_data_2 = json.loads(res_2.data.decode("utf-8"))
assert json_data_2["ARN"] != ""
assert json_data_2["Name"] == "test-secret-2"
@mock_secretsmanager
def test_describe_secret():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": "test-secret", "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
describe_secret = test_client.post(
"/",
data={"SecretId": "test-secret"},
headers={"X-Amz-Target": "secretsmanager.DescribeSecret"},
)
create_secret_2 = test_client.post(
"/",
data={"Name": "test-secret-2", "SecretString": "barsecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
describe_secret_2 = test_client.post(
"/",
data={"SecretId": "test-secret-2"},
headers={"X-Amz-Target": "secretsmanager.DescribeSecret"},
)
json_data = json.loads(describe_secret.data.decode("utf-8"))
assert json_data # Returned dict is not empty
assert json_data["ARN"] != ""
assert json_data["Name"] == "test-secret"
json_data_2 = json.loads(describe_secret_2.data.decode("utf-8"))
assert json_data_2 # Returned dict is not empty
assert json_data_2["ARN"] != ""
assert json_data_2["Name"] == "test-secret-2"
@mock_secretsmanager
def test_describe_secret_that_does_not_exist():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
describe_secret = test_client.post(
"/",
data={"SecretId": "i-dont-exist"},
headers={"X-Amz-Target": "secretsmanager.DescribeSecret"},
)
json_data = json.loads(describe_secret.data.decode("utf-8"))
assert json_data["message"] == "Secrets Manager can't find the specified secret."
assert json_data["__type"] == "ResourceNotFoundException"
@mock_secretsmanager
def test_describe_secret_that_does_not_match():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
describe_secret = test_client.post(
"/",
data={"SecretId": "i-dont-match"},
headers={"X-Amz-Target": "secretsmanager.DescribeSecret"},
)
json_data = json.loads(describe_secret.data.decode("utf-8"))
assert json_data["message"] == "Secrets Manager can't find the specified secret."
assert json_data["__type"] == "ResourceNotFoundException"
@mock_secretsmanager
def test_rotate_secret():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
client_request_token = "EXAMPLE2-90ab-cdef-fedc-ba987SECRET2"
rotate_secret = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"ClientRequestToken": client_request_token,
},
headers={"X-Amz-Target": "secretsmanager.RotateSecret"},
)
json_data = json.loads(rotate_secret.data.decode("utf-8"))
assert json_data # Returned dict is not empty
assert json_data["ARN"] != ""
assert json_data["Name"] == DEFAULT_SECRET_NAME
assert json_data["VersionId"] == client_request_token
# @mock_secretsmanager
# def test_rotate_secret_enable_rotation():
# backend = server.create_backend_app('secretsmanager')
# test_client = backend.test_client()
# create_secret = test_client.post(
# '/',
# data={
# "Name": "test-secret",
# "SecretString": "foosecret"
# },
# headers={
# "X-Amz-Target": "secretsmanager.CreateSecret"
# },
# )
# initial_description = test_client.post(
# '/',
# data={
# "SecretId": "test-secret"
# },
# headers={
# "X-Amz-Target": "secretsmanager.DescribeSecret"
# },
# )
# json_data = json.loads(initial_description.data.decode("utf-8"))
# assert json_data # Returned dict is not empty
# assert json_data['RotationEnabled'] is False
# assert json_data['RotationRules']['AutomaticallyAfterDays'] == 0
# rotate_secret = test_client.post(
# '/',
# data={
# "SecretId": "test-secret",
# "RotationRules": {"AutomaticallyAfterDays": 42}
# },
# headers={
# "X-Amz-Target": "secretsmanager.RotateSecret"
# },
# )
# rotated_description = test_client.post(
# '/',
# data={
# "SecretId": "test-secret"
# },
# headers={
# "X-Amz-Target": "secretsmanager.DescribeSecret"
# },
# )
# json_data = json.loads(rotated_description.data.decode("utf-8"))
# assert json_data # Returned dict is not empty
# assert json_data['RotationEnabled'] is True
# assert json_data['RotationRules']['AutomaticallyAfterDays'] == 42
@mock_secretsmanager
def test_rotate_secret_that_does_not_exist():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
rotate_secret = test_client.post(
"/",
data={"SecretId": "i-dont-exist"},
headers={"X-Amz-Target": "secretsmanager.RotateSecret"},
)
json_data = json.loads(rotate_secret.data.decode("utf-8"))
assert json_data["message"] == "Secrets Manager can't find the specified secret."
assert json_data["__type"] == "ResourceNotFoundException"
@mock_secretsmanager
def test_rotate_secret_that_does_not_match():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
rotate_secret = test_client.post(
"/",
data={"SecretId": "i-dont-match"},
headers={"X-Amz-Target": "secretsmanager.RotateSecret"},
)
json_data = json.loads(rotate_secret.data.decode("utf-8"))
assert json_data["message"] == "Secrets Manager can't find the specified secret."
assert json_data["__type"] == "ResourceNotFoundException"
@mock_secretsmanager
def test_rotate_secret_that_is_still_rotating():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={
"Name": DEFAULT_SECRET_NAME,
"SecretString": "foosecret",
# "VersionStages": ["AWSPENDING"],
},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
create_secret = json.loads(create_secret.data.decode("utf-8"))
# Get the secret into a broken state.
version_id = create_secret["VersionId"]
test_client.post(
"/",
data={
"SecretId": "test-secret",
"VersionStage": "AWSPENDING",
"MoveToVersionId": version_id,
},
headers={"X-Amz-Target": "secretsmanager.UpdateSecretVersionStage"},
)
describe_secret = test_client.post(
"/",
data={"SecretId": DEFAULT_SECRET_NAME},
headers={"X-Amz-Target": "secretsmanager.DescribeSecret"},
)
metadata = json.loads(describe_secret.data.decode("utf-8"))
assert metadata["SecretVersionsToStages"][version_id] == [
"AWSCURRENT",
"AWSPENDING",
]
# Then attempt to rotate it
rotate_secret = test_client.post(
"/",
data={"SecretId": DEFAULT_SECRET_NAME},
headers={"X-Amz-Target": "secretsmanager.RotateSecret"},
)
assert rotate_secret.status_code == 400
@mock_secretsmanager
def test_rotate_secret_client_request_token_too_short():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
client_request_token = "ED9F8B6C-85B7-B7E4-38F2A3BEB13C"
rotate_secret = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"ClientRequestToken": client_request_token,
},
headers={"X-Amz-Target": "secretsmanager.RotateSecret"},
)
json_data = json.loads(rotate_secret.data.decode("utf-8"))
assert json_data["message"] == "ClientRequestToken must be 32-64 characters long."
assert json_data["__type"] == "InvalidParameterException"
@mock_secretsmanager
def test_rotate_secret_client_request_token_too_long():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
client_request_token = (
"ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C-" "ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C"
)
rotate_secret = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"ClientRequestToken": client_request_token,
},
headers={"X-Amz-Target": "secretsmanager.RotateSecret"},
)
json_data = json.loads(rotate_secret.data.decode("utf-8"))
assert json_data["message"] == "ClientRequestToken must be 32-64 characters long."
assert json_data["__type"] == "InvalidParameterException"
@mock_secretsmanager
def test_rotate_secret_rotation_lambda_arn_too_long():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
rotation_lambda_arn = "85B7-446A-B7E4" * 147 # == 2058 characters
rotate_secret = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"RotationLambdaARN": rotation_lambda_arn,
},
headers={"X-Amz-Target": "secretsmanager.RotateSecret"},
)
json_data = json.loads(rotate_secret.data.decode("utf-8"))
assert json_data["message"] == "RotationLambdaARN must <= 2048 characters long."
assert json_data["__type"] == "InvalidParameterException"
if not settings.TEST_SERVER_MODE:
@mock_iam
@mock_lambda
@mock_logs
@mock_secretsmanager
def test_rotate_secret_lambda_invocations():
conn = boto3.client("iam", region_name="us-east-1")
logs_conn = boto3.client("logs", region_name="us-east-1")
role = conn.create_role(
RoleName="role", AssumeRolePolicyDocument="some policy", Path="/my-path/",
)
conn = boto3.client("lambda", region_name="us-east-1")
func = conn.create_function(
FunctionName="testFunction",
Code=dict(ZipFile=get_test_zip_file1()),
Handler="lambda_function.lambda_handler",
Runtime="python2.7",
Role=role["Role"]["Arn"],
)
secretsmanager_backend = server.create_backend_app("secretsmanager")
secretsmanager_client = secretsmanager_backend.test_client()
secretsmanager_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
with pytest.raises(logs_conn.exceptions.ResourceNotFoundException):
# The log group doesn't exist yet
logs_conn.describe_log_streams(logGroupName="/aws/lambda/testFunction")
secretsmanager_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"RotationLambdaARN": func["FunctionArn"],
},
headers={"X-Amz-Target": "secretsmanager.RotateSecret"},
)
# The log group now exists and has been logged to 4 times (for each invocation)
logs = logs_conn.describe_log_streams(logGroupName="/aws/lambda/testFunction")
assert len(logs["logStreams"]) == 4
@mock_iam
@mock_lambda
@mock_logs
@mock_secretsmanager
def test_rotate_secret_with_incorrect_lambda_arn():
secretsmanager_backend = server.create_backend_app("secretsmanager")
secretsmanager_client = secretsmanager_backend.test_client()
secretsmanager_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
resp = secretsmanager_client.post(
"/",
data={"SecretId": DEFAULT_SECRET_NAME, "RotationLambdaARN": "notarealarn",},
headers={"X-Amz-Target": "secretsmanager.RotateSecret"},
)
json_data = json.loads(resp.data.decode("utf-8"))
assert json_data["message"] == "Resource not found for ARN 'notarealarn'."
assert json_data["__type"] == "ResourceNotFoundException"
assert resp.status_code == 404
@mock_secretsmanager
def test_put_secret_value_puts_new_secret():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"SecretString": "foosecret",
"VersionStages": ["AWSCURRENT"],
},
headers={"X-Amz-Target": "secretsmanager.PutSecretValue"},
)
put_second_secret_value_json = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"SecretString": "foosecret",
"VersionStages": ["AWSCURRENT"],
},
headers={"X-Amz-Target": "secretsmanager.PutSecretValue"},
)
second_secret_json_data = json.loads(
put_second_secret_value_json.data.decode("utf-8")
)
version_id = second_secret_json_data["VersionId"]
secret_value_json = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"VersionId": version_id,
"VersionStage": "AWSCURRENT",
},
headers={"X-Amz-Target": "secretsmanager.GetSecretValue"},
)
second_secret_json_data = json.loads(secret_value_json.data.decode("utf-8"))
assert second_secret_json_data
assert second_secret_json_data["SecretString"] == "foosecret"
@mock_secretsmanager
def test_put_secret_value_can_get_first_version_if_put_twice():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
first_secret_string = "first_secret"
second_secret_string = "second_secret"
test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
put_first_secret_value_json = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"SecretString": first_secret_string,
"VersionStages": ["AWSCURRENT"],
},
headers={"X-Amz-Target": "secretsmanager.PutSecretValue"},
)
first_secret_json_data = json.loads(
put_first_secret_value_json.data.decode("utf-8")
)
first_secret_version_id = first_secret_json_data["VersionId"]
test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"SecretString": second_secret_string,
"VersionStages": ["AWSCURRENT"],
},
headers={"X-Amz-Target": "secretsmanager.PutSecretValue"},
)
get_first_secret_value_json = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"VersionId": first_secret_version_id,
"VersionStage": "AWSCURRENT",
},
headers={"X-Amz-Target": "secretsmanager.GetSecretValue"},
)
get_first_secret_json_data = json.loads(
get_first_secret_value_json.data.decode("utf-8")
)
assert get_first_secret_json_data
assert get_first_secret_json_data["SecretString"] == first_secret_string
@mock_secretsmanager
def test_put_secret_value_versions_differ_if_same_secret_put_twice():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
put_first_secret_value_json = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"SecretString": "secret",
"VersionStages": ["AWSCURRENT"],
},
headers={"X-Amz-Target": "secretsmanager.PutSecretValue"},
)
first_secret_json_data = json.loads(
put_first_secret_value_json.data.decode("utf-8")
)
first_secret_version_id = first_secret_json_data["VersionId"]
put_second_secret_value_json = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"SecretString": "secret",
"VersionStages": ["AWSCURRENT"],
},
headers={"X-Amz-Target": "secretsmanager.PutSecretValue"},
)
second_secret_json_data = json.loads(
put_second_secret_value_json.data.decode("utf-8")
)
second_secret_version_id = second_secret_json_data["VersionId"]
assert first_secret_version_id != second_secret_version_id
@mock_secretsmanager
def test_can_list_secret_version_ids():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
put_first_secret_value_json = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"SecretString": "secret",
"VersionStages": ["AWSCURRENT"],
},
headers={"X-Amz-Target": "secretsmanager.PutSecretValue"},
)
first_secret_json_data = json.loads(
put_first_secret_value_json.data.decode("utf-8")
)
first_secret_version_id = first_secret_json_data["VersionId"]
put_second_secret_value_json = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"SecretString": "secret",
"VersionStages": ["AWSCURRENT"],
},
headers={"X-Amz-Target": "secretsmanager.PutSecretValue"},
)
second_secret_json_data = json.loads(
put_second_secret_value_json.data.decode("utf-8")
)
second_secret_version_id = second_secret_json_data["VersionId"]
list_secret_versions_json = test_client.post(
"/",
data={"SecretId": DEFAULT_SECRET_NAME},
headers={"X-Amz-Target": "secretsmanager.ListSecretVersionIds"},
)
versions_list = json.loads(list_secret_versions_json.data.decode("utf-8"))
returned_version_ids = [v["VersionId"] for v in versions_list["Versions"]]
assert [
first_secret_version_id,
second_secret_version_id,
].sort() == returned_version_ids.sort()
@mock_secretsmanager
def test_get_resource_policy_secret():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": "test-secret", "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
describe_secret = test_client.post(
"/",
data={"SecretId": "test-secret"},
headers={"X-Amz-Target": "secretsmanager.GetResourcePolicy"},
)
json_data = json.loads(describe_secret.data.decode("utf-8"))
assert json_data # Returned dict is not empty
assert json_data["ARN"] != ""
assert json_data["Name"] == "test-secret"
@mock_secretsmanager
def test_update_secret_version_stage():
custom_stage = "CUSTOM_STAGE"
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": "test-secret", "SecretString": "secret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
create_secret = json.loads(create_secret.data.decode("utf-8"))
initial_version = create_secret["VersionId"]
# Create a new version
put_secret = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"SecretString": "secret",
"VersionStages": [custom_stage],
},
headers={"X-Amz-Target": "secretsmanager.PutSecretValue"},
)
put_secret = json.loads(put_secret.data.decode("utf-8"))
new_version = put_secret["VersionId"]
describe_secret = test_client.post(
"/",
data={"SecretId": "test-secret"},
headers={"X-Amz-Target": "secretsmanager.DescribeSecret"},
)
json_data = json.loads(describe_secret.data.decode("utf-8"))
stages = json_data["SecretVersionsToStages"]
assert len(stages) == 2
assert stages[initial_version] == ["AWSPREVIOUS"]
assert stages[new_version] == [custom_stage]
test_client.post(
"/",
data={
"SecretId": "test-secret",
"VersionStage": custom_stage,
"RemoveFromVersionId": new_version,
"MoveToVersionId": initial_version,
},
headers={"X-Amz-Target": "secretsmanager.UpdateSecretVersionStage"},
)
describe_secret = test_client.post(
"/",
data={"SecretId": "test-secret"},
headers={"X-Amz-Target": "secretsmanager.DescribeSecret"},
)
json_data = json.loads(describe_secret.data.decode("utf-8"))
stages = json_data["SecretVersionsToStages"]
assert len(stages) == 2
assert stages[initial_version] == ["AWSPREVIOUS", custom_stage]
assert stages[new_version] == []
@mock_secretsmanager
def test_update_secret_version_stage_currentversion_handling():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": "test-secret", "SecretString": "secret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
create_secret = json.loads(create_secret.data.decode("utf-8"))
initial_version = create_secret["VersionId"]
# Create a new version
put_secret = test_client.post(
"/",
data={"SecretId": DEFAULT_SECRET_NAME, "SecretString": "secret",},
headers={"X-Amz-Target": "secretsmanager.PutSecretValue"},
)
put_secret = json.loads(put_secret.data.decode("utf-8"))
new_version = put_secret["VersionId"]
describe_secret = test_client.post(
"/",
data={"SecretId": "test-secret"},
headers={"X-Amz-Target": "secretsmanager.DescribeSecret"},
)
json_data = json.loads(describe_secret.data.decode("utf-8"))
stages = json_data["SecretVersionsToStages"]
assert len(stages) == 2
assert stages[initial_version] == ["AWSPREVIOUS"]
assert stages[new_version] == ["AWSCURRENT"]
test_client.post(
"/",
data={
"SecretId": "test-secret",
"VersionStage": "AWSCURRENT",
"RemoveFromVersionId": new_version,
"MoveToVersionId": initial_version,
},
headers={"X-Amz-Target": "secretsmanager.UpdateSecretVersionStage"},
)
describe_secret = test_client.post(
"/",
data={"SecretId": "test-secret"},
headers={"X-Amz-Target": "secretsmanager.DescribeSecret"},
)
json_data = json.loads(describe_secret.data.decode("utf-8"))
stages = json_data["SecretVersionsToStages"]
assert len(stages) == 2
assert stages[initial_version] == ["AWSCURRENT"]
assert stages[new_version] == ["AWSPREVIOUS"]
@mock_secretsmanager
def test_update_secret_version_stage_validation():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
# Secret ID that doesn't exist
resp = test_client.post(
"/",
data={"SecretId": "nonexistent"},
headers={"X-Amz-Target": "secretsmanager.UpdateSecretVersionStage"},
)
assert resp.status_code == 404
# Add a secret so we can run further checks
secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "secret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
secret = json.loads(secret.data.decode("utf-8"))
# "Remove from" version ID that doesn't exist
resp = test_client.post(
"/",
data={"SecretId": DEFAULT_SECRET_NAME, "RemoveFromVersionId": "nonexistent"},
headers={"X-Amz-Target": "secretsmanager.UpdateSecretVersionStage"},
)
assert resp.status_code == 400
# "Remove from" stage name which isn't attached to the given version
resp = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"RemoveFromVersionId": secret["VersionId"],
"VersionStage": "nonexistent",
},
headers={"X-Amz-Target": "secretsmanager.UpdateSecretVersionStage"},
)
assert resp.status_code == 400
# "Move to" version ID that doesn't exist
resp = test_client.post(
"/",
data={"SecretId": DEFAULT_SECRET_NAME, "MoveToVersionId": "nonexistent",},
headers={"X-Amz-Target": "secretsmanager.UpdateSecretVersionStage"},
)
assert resp.status_code == 400
#
# The following tests should work, but fail on the embedded dict in
# RotationRules. The error message suggests a problem deeper in the code, which
# needs further investigation.
#
# @mock_secretsmanager
# def test_rotate_secret_rotation_period_zero():
# backend = server.create_backend_app('secretsmanager')
# test_client = backend.test_client()
# create_secret = test_client.post('/',
# data={"Name": "test-secret",
# "SecretString": "foosecret"},
# headers={
# "X-Amz-Target": "secretsmanager.CreateSecret"
# },
# )
# rotate_secret = test_client.post('/',
# data={"SecretId": "test-secret",
# "RotationRules": {"AutomaticallyAfterDays": 0}},
# headers={
# "X-Amz-Target": "secretsmanager.RotateSecret"
# },
# )
# json_data = json.loads(rotate_secret.data.decode("utf-8"))
# assert json_data['message'] == "RotationRules.AutomaticallyAfterDays must be within 1-1000."
# assert json_data['__type'] == 'InvalidParameterException'
# @mock_secretsmanager
# def test_rotate_secret_rotation_period_too_long():
# backend = server.create_backend_app('secretsmanager')
# test_client = backend.test_client()
# create_secret = test_client.post('/',
# data={"Name": "test-secret",
# "SecretString": "foosecret"},
# headers={
# "X-Amz-Target": "secretsmanager.CreateSecret"
# },
# )
# rotate_secret = test_client.post('/',
# data={"SecretId": "test-secret",
# "RotationRules": {"AutomaticallyAfterDays": 1001}},
# headers={
# "X-Amz-Target": "secretsmanager.RotateSecret"
# },
# )
# json_data = json.loads(rotate_secret.data.decode("utf-8"))
# assert json_data['message'] == "RotationRules.AutomaticallyAfterDays must be within 1-1000."
# assert json_data['__type'] == 'InvalidParameterException'
| 33.826707 | 98 | 0.62271 |
169ba1a32e401a532b258c5965c12379f6f58bbe
| 3,359 |
py
|
Python
|
douglas/tests/test_entries.py
|
willkg/douglas
|
7e46919d0baefecba414f41980cbe9c0529a884e
|
[
"MIT"
] | 1 |
2016-02-12T15:26:24.000Z
|
2016-02-12T15:26:24.000Z
|
douglas/tests/test_entries.py
|
willkg/douglas
|
7e46919d0baefecba414f41980cbe9c0529a884e
|
[
"MIT"
] | 1 |
2015-04-20T13:33:39.000Z
|
2015-04-20T13:33:39.000Z
|
douglas/tests/test_entries.py
|
willkg/douglas
|
7e46919d0baefecba414f41980cbe9c0529a884e
|
[
"MIT"
] | null | null | null |
import time
from os import environ
from nose.tools import eq_, raises
from douglas.entries.base import EntryBase, generate_entry
from douglas.tests import req_, UnitTestBase
TIME1 = (2008, 7, 21, 12, 51, 47, 0, 203, 1)
class TestEntryBase(UnitTestBase):
def force_tz(self):
"""
Force time zone to 'US/Eastern'.
Some of the above tests are time zone dependent.
"""
self.__tz = environ.get('TZ')
environ['TZ'] = 'US/Eastern'
time.tzset()
def restore_tz(self):
"""
Restore time zone to what it was before __force_tz() call.
"""
if self.__tz:
environ['TZ'] = self.__tz
self.__tz = None
else:
del environ['TZ']
time.tzset()
def test_time(self):
e = EntryBase(req_())
# set_time takes local time, and results depend on time zone.
self.force_tz()
e.set_time(TIME1)
self.restore_tz()
tests = [
('timetuple', TIME1),
('mtime', 1216659107.0),
('ti', '12:51'),
('mo', 'Jul'),
('mo_num', '07'),
('da', '21'),
('dw', 'Monday'),
('yr', '2008'),
('fulltime', '20080721125147'),
('date', 'Mon, 21 Jul 2008'),
('w3cdate', '2008-07-21T16:51:47Z'),
('rfc822date', 'Mon, 21 Jul 2008 16:51 GMT')
]
for key, expected in tests:
eq_(e[key], expected)
def test_dictlike(self):
e = EntryBase(req_())
e['foo'] = 'bar'
e['body'] = 'entry body'
eq_(sorted(e.keys()), ['body', 'foo'])
eq_(e['foo'], 'bar')
eq_(e.get('foo'), 'bar')
eq_(e.get('foo', 'fickle'), 'bar')
eq_(e['body'], 'entry body', 'e[\'body\']')
eq_(e.get('body'), 'entry body', 'e.get(\'body\')')
eq_(e.get('missing_key', 'default'), 'default')
eq_(e.get('missing_key'), None)
eq_('foo' in e, True)
eq_('foo2' in e, False)
eq_('foo2' not in e, True)
eq_('body' in e, True)
e.update({'foo': 'bah', 'faux': 'pearls'})
eq_(e['foo'], 'bah')
eq_(e['faux'], 'pearls')
e.update({'body': 'new body data'})
eq_(e['body'], 'new body data')
del e['foo']
eq_(e.get('foo'), None)
@raises(KeyError)
def test_delitem_keyerror(self):
e = EntryBase(req_())
del e['missing_key']
@raises(KeyError)
def test_delitem_valueerror(self):
e = EntryBase(req_())
del e['body']
def test_generate_entry(self):
# generate_entry takes local time, and we test the resulting
# rfc822date which is UTC. Result depends on time zone.
self.force_tz()
e = generate_entry(req_(), {'foo': 'bar'}, 'entry body', TIME1)
self.restore_tz()
eq_(e['foo'], 'bar')
eq_(e['body'], 'entry body')
eq_(e['rfc822date'], 'Mon, 21 Jul 2008 16:51 GMT')
e = generate_entry(req_(), {'foo': 'bar'}, 'entry body')
def test_repr(self):
# it doesn't really matter what __repr__ sends back--it's only used
# for logging/debugging. so this test adds coverage for that line to
# make sure it doesn't error out.
e = EntryBase(req_())
repr(e)
| 27.760331 | 77 | 0.51682 |
c3ed367f89be0160137704dcb23dc7d1906f9ed0
| 6,382 |
py
|
Python
|
pymodbus/events.py
|
vmacari/pymodbus
|
ec97e2f2b50c6db0a932f44e550a5dee60bf0970
|
[
"BSD-3-Clause"
] | 1,125 |
2017-05-11T06:11:36.000Z
|
2022-03-31T02:59:45.000Z
|
pymodbus/events.py
|
vmacari/pymodbus
|
ec97e2f2b50c6db0a932f44e550a5dee60bf0970
|
[
"BSD-3-Clause"
] | 575 |
2017-05-12T02:46:55.000Z
|
2022-03-31T16:00:33.000Z
|
pymodbus/events.py
|
vmacari/pymodbus
|
ec97e2f2b50c6db0a932f44e550a5dee60bf0970
|
[
"BSD-3-Clause"
] | 516 |
2017-05-19T14:06:06.000Z
|
2022-03-31T06:10:13.000Z
|
'''
Modbus Remote Events
------------------------------------------------------------
An event byte returned by the Get Communications Event Log function
can be any one of four types. The type is defined by bit 7
(the high-order bit) in each byte. It may be further defined by bit 6.
'''
from pymodbus.exceptions import NotImplementedException
from pymodbus.exceptions import ParameterException
from pymodbus.utilities import pack_bitstring, unpack_bitstring
class ModbusEvent(object):
def encode(self):
''' Encodes the status bits to an event message
:returns: The encoded event message
'''
raise NotImplementedException()
def decode(self, event):
''' Decodes the event message to its status bits
:param event: The event to decode
'''
raise NotImplementedException()
class RemoteReceiveEvent(ModbusEvent):
''' Remote device MODBUS Receive Event
The remote device stores this type of event byte when a query message
is received. It is stored before the remote device processes the message.
This event is defined by bit 7 set to logic '1'. The other bits will be
set to a logic '1' if the corresponding condition is TRUE. The bit layout
is::
Bit Contents
----------------------------------
0 Not Used
2 Not Used
3 Not Used
4 Character Overrun
5 Currently in Listen Only Mode
6 Broadcast Receive
7 1
'''
def __init__(self, **kwargs):
''' Initialize a new event instance
'''
self.overrun = kwargs.get('overrun', False)
self.listen = kwargs.get('listen', False)
self.broadcast = kwargs.get('broadcast', False)
def encode(self):
''' Encodes the status bits to an event message
:returns: The encoded event message
'''
bits = [False] * 3
bits += [self.overrun, self.listen, self.broadcast, True]
packet = pack_bitstring(bits)
return packet
def decode(self, event):
''' Decodes the event message to its status bits
:param event: The event to decode
'''
bits = unpack_bitstring(event)
self.overrun = bits[4]
self.listen = bits[5]
self.broadcast = bits[6]
class RemoteSendEvent(ModbusEvent):
''' Remote device MODBUS Send Event
The remote device stores this type of event byte when it finishes
processing a request message. It is stored if the remote device
returned a normal or exception response, or no response.
This event is defined by bit 7 set to a logic '0', with bit 6 set to a '1'.
The other bits will be set to a logic '1' if the corresponding
condition is TRUE. The bit layout is::
Bit Contents
-----------------------------------------------------------
0 Read Exception Sent (Exception Codes 1-3)
1 Slave Abort Exception Sent (Exception Code 4)
2 Slave Busy Exception Sent (Exception Codes 5-6)
3 Slave Program NAK Exception Sent (Exception Code 7)
4 Write Timeout Error Occurred
5 Currently in Listen Only Mode
6 1
7 0
'''
def __init__(self, **kwargs):
''' Initialize a new event instance
'''
self.read = kwargs.get('read', False)
self.slave_abort = kwargs.get('slave_abort', False)
self.slave_busy = kwargs.get('slave_busy', False)
self.slave_nak = kwargs.get('slave_nak', False)
self.write_timeout = kwargs.get('write_timeout', False)
self.listen = kwargs.get('listen', False)
def encode(self):
''' Encodes the status bits to an event message
:returns: The encoded event message
'''
bits = [self.read, self.slave_abort, self.slave_busy,
self.slave_nak, self.write_timeout, self.listen]
bits += [True, False]
packet = pack_bitstring(bits)
return packet
def decode(self, event):
''' Decodes the event message to its status bits
:param event: The event to decode
'''
# todo fix the start byte count
bits = unpack_bitstring(event)
self.read = bits[0]
self.slave_abort = bits[1]
self.slave_busy = bits[2]
self.slave_nak = bits[3]
self.write_timeout = bits[4]
self.listen = bits[5]
class EnteredListenModeEvent(ModbusEvent):
''' Remote device Entered Listen Only Mode
The remote device stores this type of event byte when it enters
the Listen Only Mode. The event is defined by a content of 04 hex.
'''
value = 0x04
__encoded = b'\x04'
def encode(self):
''' Encodes the status bits to an event message
:returns: The encoded event message
'''
return self.__encoded
def decode(self, event):
''' Decodes the event message to its status bits
:param event: The event to decode
'''
if event != self.__encoded:
raise ParameterException('Invalid decoded value')
class CommunicationRestartEvent(ModbusEvent):
''' Remote device Initiated Communication Restart
The remote device stores this type of event byte when its communications
port is restarted. The remote device can be restarted by the Diagnostics
function (code 08), with sub-function Restart Communications Option
(code 00 01).
That function also places the remote device into a 'Continue on Error'
or 'Stop on Error' mode. If the remote device is placed into 'Continue on
Error' mode, the event byte is added to the existing event log. If the
remote device is placed into 'Stop on Error' mode, the byte is added to
the log and the rest of the log is cleared to zeros.
The event is defined by a content of zero.
'''
value = 0x00
__encoded = b'\x00'
def encode(self):
''' Encodes the status bits to an event message
:returns: The encoded event message
'''
return self.__encoded
def decode(self, event):
''' Decodes the event message to its status bits
:param event: The event to decode
'''
if event != self.__encoded:
raise ParameterException('Invalid decoded value')
| 32.232323 | 79 | 0.618145 |
a2a577a0753f7554ca86ee32be2a9a735fe0d983
| 25,151 |
py
|
Python
|
pandas/tests/io/formats/test_to_html.py
|
sofiane87/pandas
|
0de99558b497c5611cbe5d35d504763bd7692275
|
[
"BSD-3-Clause"
] | 2 |
2019-11-13T18:20:29.000Z
|
2020-04-18T02:58:39.000Z
|
pandas/tests/io/formats/test_to_html.py
|
ivan-vasilev/pandas
|
4071dde86e33434e1bee8304fa62074949f813cc
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/io/formats/test_to_html.py
|
ivan-vasilev/pandas
|
4071dde86e33434e1bee8304fa62074949f813cc
|
[
"BSD-3-Clause"
] | 2 |
2019-12-21T21:17:43.000Z
|
2019-12-26T10:34:36.000Z
|
from datetime import datetime
from io import StringIO
import re
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, option_context
import pandas.util.testing as tm
import pandas.io.formats.format as fmt
lorem_ipsum = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod"
" tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim"
" veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex"
" ea commodo consequat. Duis aute irure dolor in reprehenderit in"
" voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur"
" sint occaecat cupidatat non proident, sunt in culpa qui officia"
" deserunt mollit anim id est laborum."
)
def expected_html(datapath, name):
"""
Read HTML file from formats data directory.
Parameters
----------
datapath : pytest fixture
The datapath fixture injected into a test by pytest.
name : str
The name of the HTML file without the suffix.
Returns
-------
str : contents of HTML file.
"""
filename = ".".join([name, "html"])
filepath = datapath("io", "formats", "data", "html", filename)
with open(filepath, encoding="utf-8") as f:
html = f.read()
return html.rstrip()
@pytest.fixture(params=["mixed", "empty"])
def biggie_df_fixture(request):
"""Fixture for a big mixed Dataframe and an empty Dataframe"""
if request.param == "mixed":
df = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
df.loc[:20, "A"] = np.nan
df.loc[:20, "B"] = np.nan
return df
elif request.param == "empty":
df = DataFrame(index=np.arange(200))
return df
@pytest.fixture(params=fmt._VALID_JUSTIFY_PARAMETERS)
def justify(request):
return request.param
@pytest.mark.parametrize("col_space", [30, 50])
def test_to_html_with_col_space(col_space):
df = DataFrame(np.random.random(size=(1, 3)))
# check that col_space affects HTML generation
# and be very brittle about it.
result = df.to_html(col_space=col_space)
hdrs = [x for x in result.split(r"\n") if re.search(r"<th[>\s]", x)]
assert len(hdrs) > 0
for h in hdrs:
assert "min-width" in h
assert str(col_space) in h
def test_to_html_with_empty_string_label():
# GH 3547, to_html regards empty string labels as repeated labels
data = {"c1": ["a", "b"], "c2": ["a", ""], "data": [1, 2]}
df = DataFrame(data).set_index(["c1", "c2"])
result = df.to_html()
assert "rowspan" not in result
@pytest.mark.parametrize(
"df,expected",
[
(DataFrame({"\u03c3": np.arange(10.0)}), "unicode_1"),
(DataFrame({"A": ["\u03c3"]}), "unicode_2"),
],
)
def test_to_html_unicode(df, expected, datapath):
expected = expected_html(datapath, expected)
result = df.to_html()
assert result == expected
def test_to_html_encoding(float_frame, tmp_path):
# GH 28663
path = tmp_path / "test.html"
float_frame.to_html(path, encoding="gbk")
with open(str(path), "r", encoding="gbk") as f:
assert float_frame.to_html() == f.read()
def test_to_html_decimal(datapath):
# GH 12031
df = DataFrame({"A": [6.0, 3.1, 2.2]})
result = df.to_html(decimal=",")
expected = expected_html(datapath, "gh12031_expected_output")
assert result == expected
@pytest.mark.parametrize(
"kwargs,string,expected",
[
(dict(), "<type 'str'>", "escaped"),
(dict(escape=False), "<b>bold</b>", "escape_disabled"),
],
)
def test_to_html_escaped(kwargs, string, expected, datapath):
a = "str<ing1 &"
b = "stri>ng2 &"
test_dict = {"co<l1": {a: string, b: string}, "co>l2": {a: string, b: string}}
result = DataFrame(test_dict).to_html(**kwargs)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize("index_is_named", [True, False])
def test_to_html_multiindex_index_false(index_is_named, datapath):
# GH 8452
df = DataFrame(
{"a": range(2), "b": range(3, 5), "c": range(5, 7), "d": range(3, 5)}
)
df.columns = MultiIndex.from_product([["a", "b"], ["c", "d"]])
if index_is_named:
df.index = Index(df.index.values, name="idx")
result = df.to_html(index=False)
expected = expected_html(datapath, "gh8452_expected_output")
assert result == expected
@pytest.mark.parametrize(
"multi_sparse,expected",
[
(False, "multiindex_sparsify_false_multi_sparse_1"),
(False, "multiindex_sparsify_false_multi_sparse_2"),
(True, "multiindex_sparsify_1"),
(True, "multiindex_sparsify_2"),
],
)
def test_to_html_multiindex_sparsify(multi_sparse, expected, datapath):
index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]], names=["foo", None])
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)
if expected.endswith("2"):
df.columns = index[::2]
with option_context("display.multi_sparse", multi_sparse):
result = df.to_html()
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize(
"max_rows,expected",
[
(60, "gh14882_expected_output_1"),
# Test that ... appears in a middle level
(56, "gh14882_expected_output_2"),
],
)
def test_to_html_multiindex_odd_even_truncate(max_rows, expected, datapath):
# GH 14882 - Issue on truncation with odd length DataFrame
index = MultiIndex.from_product(
[[100, 200, 300], [10, 20, 30], [1, 2, 3, 4, 5, 6, 7]], names=["a", "b", "c"]
)
df = DataFrame({"n": range(len(index))}, index=index)
result = df.to_html(max_rows=max_rows)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize(
"df,formatters,expected",
[
(
DataFrame(
[[0, 1], [2, 3], [4, 5], [6, 7]],
columns=["foo", None],
index=np.arange(4),
),
{"__index__": lambda x: "abcd"[x]},
"index_formatter",
),
(
DataFrame({"months": [datetime(2016, 1, 1), datetime(2016, 2, 2)]}),
{"months": lambda x: x.strftime("%Y-%m")},
"datetime64_monthformatter",
),
(
DataFrame(
{
"hod": pd.to_datetime(
["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f"
)
}
),
{"hod": lambda x: x.strftime("%H:%M")},
"datetime64_hourformatter",
),
],
)
def test_to_html_formatters(df, formatters, expected, datapath):
expected = expected_html(datapath, expected)
result = df.to_html(formatters=formatters)
assert result == expected
def test_to_html_regression_GH6098():
df = DataFrame(
{
"clé1": ["a", "a", "b", "b", "a"],
"clé2": ["1er", "2ème", "1er", "2ème", "1er"],
"données1": np.random.randn(5),
"données2": np.random.randn(5),
}
)
# it works
df.pivot_table(index=["clé1"], columns=["clé2"])._repr_html_()
def test_to_html_truncate(datapath):
index = pd.date_range(start="20010101", freq="D", periods=20)
df = DataFrame(index=index, columns=range(20))
result = df.to_html(max_rows=8, max_cols=4)
expected = expected_html(datapath, "truncate")
assert result == expected
@pytest.mark.parametrize("size", [1, 5])
def test_html_invalid_formatters_arg_raises(size):
# issue-28469
df = DataFrame(columns=["a", "b", "c"])
msg = "Formatters length({}) should match DataFrame number of columns(3)"
with pytest.raises(ValueError, match=re.escape(msg.format(size))):
df.to_html(formatters=["{}".format] * size)
def test_to_html_truncate_formatter(datapath):
# issue-25955
data = [
{"A": 1, "B": 2, "C": 3, "D": 4},
{"A": 5, "B": 6, "C": 7, "D": 8},
{"A": 9, "B": 10, "C": 11, "D": 12},
{"A": 13, "B": 14, "C": 15, "D": 16},
]
df = DataFrame(data)
fmt = lambda x: str(x) + "_mod"
formatters = [fmt, fmt, None, None]
result = df.to_html(formatters=formatters, max_cols=3)
expected = expected_html(datapath, "truncate_formatter")
assert result == expected
@pytest.mark.parametrize(
"sparsify,expected",
[(True, "truncate_multi_index"), (False, "truncate_multi_index_sparse_off")],
)
def test_to_html_truncate_multi_index(sparsify, expected, datapath):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
result = df.to_html(max_rows=7, max_cols=7, sparsify=sparsify)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize(
"option,result,expected",
[
(None, lambda df: df.to_html(), "1"),
(None, lambda df: df.to_html(border=0), "0"),
(0, lambda df: df.to_html(), "0"),
(0, lambda df: df._repr_html_(), "0"),
],
)
def test_to_html_border(option, result, expected):
df = DataFrame({"A": [1, 2]})
if option is None:
result = result(df)
else:
with option_context("display.html.border", option):
result = result(df)
expected = 'border="{}"'.format(expected)
assert expected in result
@pytest.mark.parametrize("biggie_df_fixture", ["mixed"], indirect=True)
def test_to_html(biggie_df_fixture):
# TODO: split this test
df = biggie_df_fixture
s = df.to_html()
buf = StringIO()
retval = df.to_html(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
df.to_html(columns=["B", "A"], col_space=17)
df.to_html(columns=["B", "A"], formatters={"A": lambda x: "{x:.1f}".format(x=x)})
df.to_html(columns=["B", "A"], float_format=str)
df.to_html(columns=["B", "A"], col_space=12, float_format=str)
@pytest.mark.parametrize("biggie_df_fixture", ["empty"], indirect=True)
def test_to_html_empty_dataframe(biggie_df_fixture):
df = biggie_df_fixture
df.to_html()
def test_to_html_filename(biggie_df_fixture, tmpdir):
df = biggie_df_fixture
expected = df.to_html()
path = tmpdir.join("test.html")
df.to_html(path)
result = path.read()
assert result == expected
def test_to_html_with_no_bold():
df = DataFrame({"x": np.random.randn(5)})
html = df.to_html(bold_rows=False)
result = html[html.find("</thead>")]
assert "<strong" not in result
def test_to_html_columns_arg(float_frame):
result = float_frame.to_html(columns=["A"])
assert "<th>B</th>" not in result
@pytest.mark.parametrize(
"columns,justify,expected",
[
(
MultiIndex.from_tuples(
list(zip(np.arange(2).repeat(2), np.mod(range(4), 2))),
names=["CL0", "CL1"],
),
"left",
"multiindex_1",
),
(
MultiIndex.from_tuples(list(zip(range(4), np.mod(range(4), 2)))),
"right",
"multiindex_2",
),
],
)
def test_to_html_multiindex(columns, justify, expected, datapath):
df = DataFrame([list("abcd"), list("efgh")], columns=columns)
result = df.to_html(justify=justify)
expected = expected_html(datapath, expected)
assert result == expected
def test_to_html_justify(justify, datapath):
df = DataFrame(
{"A": [6, 30000, 2], "B": [1, 2, 70000], "C": [223442, 0, 1]},
columns=["A", "B", "C"],
)
result = df.to_html(justify=justify)
expected = expected_html(datapath, "justify").format(justify=justify)
assert result == expected
@pytest.mark.parametrize(
"justify", ["super-right", "small-left", "noinherit", "tiny", "pandas"]
)
def test_to_html_invalid_justify(justify):
# GH 17527
df = DataFrame()
msg = "Invalid value for justify parameter"
with pytest.raises(ValueError, match=msg):
df.to_html(justify=justify)
def test_to_html_index(datapath):
# TODO: split this test
index = ["foo", "bar", "baz"]
df = DataFrame(
{"A": [1, 2, 3], "B": [1.2, 3.4, 5.6], "C": ["one", "two", np.nan]},
columns=["A", "B", "C"],
index=index,
)
expected_with_index = expected_html(datapath, "index_1")
assert df.to_html() == expected_with_index
expected_without_index = expected_html(datapath, "index_2")
result = df.to_html(index=False)
for i in index:
assert i not in result
assert result == expected_without_index
df.index = Index(["foo", "bar", "baz"], name="idx")
expected_with_index = expected_html(datapath, "index_3")
assert df.to_html() == expected_with_index
assert df.to_html(index=False) == expected_without_index
tuples = [("foo", "car"), ("foo", "bike"), ("bar", "car")]
df.index = MultiIndex.from_tuples(tuples)
expected_with_index = expected_html(datapath, "index_4")
assert df.to_html() == expected_with_index
result = df.to_html(index=False)
for i in ["foo", "bar", "car", "bike"]:
assert i not in result
# must be the same result as normal index
assert result == expected_without_index
df.index = MultiIndex.from_tuples(tuples, names=["idx1", "idx2"])
expected_with_index = expected_html(datapath, "index_5")
assert df.to_html() == expected_with_index
assert df.to_html(index=False) == expected_without_index
@pytest.mark.parametrize("classes", ["sortable draggable", ["sortable", "draggable"]])
def test_to_html_with_classes(classes, datapath):
df = DataFrame()
expected = expected_html(datapath, "with_classes")
result = df.to_html(classes=classes)
assert result == expected
def test_to_html_no_index_max_rows(datapath):
# GH 14998
df = DataFrame({"A": [1, 2, 3, 4]})
result = df.to_html(index=False, max_rows=1)
expected = expected_html(datapath, "gh14998_expected_output")
assert result == expected
def test_to_html_multiindex_max_cols(datapath):
# GH 6131
index = MultiIndex(
levels=[["ba", "bb", "bc"], ["ca", "cb", "cc"]],
codes=[[0, 1, 2], [0, 1, 2]],
names=["b", "c"],
)
columns = MultiIndex(
levels=[["d"], ["aa", "ab", "ac"]],
codes=[[0, 0, 0], [0, 1, 2]],
names=[None, "a"],
)
data = np.array(
[[1.0, np.nan, np.nan], [np.nan, 2.0, np.nan], [np.nan, np.nan, 3.0]]
)
df = DataFrame(data, index, columns)
result = df.to_html(max_cols=2)
expected = expected_html(datapath, "gh6131_expected_output")
assert result == expected
def test_to_html_multi_indexes_index_false(datapath):
# GH 22579
df = DataFrame(
{"a": range(10), "b": range(10, 20), "c": range(10, 20), "d": range(10, 20)}
)
df.columns = MultiIndex.from_product([["a", "b"], ["c", "d"]])
df.index = MultiIndex.from_product([["a", "b"], ["c", "d", "e", "f", "g"]])
result = df.to_html(index=False)
expected = expected_html(datapath, "gh22579_expected_output")
assert result == expected
@pytest.mark.parametrize("index_names", [True, False])
@pytest.mark.parametrize("header", [True, False])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize(
"column_index, column_type",
[
(Index([0, 1]), "unnamed_standard"),
(Index([0, 1], name="columns.name"), "named_standard"),
(MultiIndex.from_product([["a"], ["b", "c"]]), "unnamed_multi"),
(
MultiIndex.from_product(
[["a"], ["b", "c"]], names=["columns.name.0", "columns.name.1"]
),
"named_multi",
),
],
)
@pytest.mark.parametrize(
"row_index, row_type",
[
(Index([0, 1]), "unnamed_standard"),
(Index([0, 1], name="index.name"), "named_standard"),
(MultiIndex.from_product([["a"], ["b", "c"]]), "unnamed_multi"),
(
MultiIndex.from_product(
[["a"], ["b", "c"]], names=["index.name.0", "index.name.1"]
),
"named_multi",
),
],
)
def test_to_html_basic_alignment(
datapath, row_index, row_type, column_index, column_type, index, header, index_names
):
# GH 22747, GH 22579
df = DataFrame(np.zeros((2, 2), dtype=int), index=row_index, columns=column_index)
result = df.to_html(index=index, header=header, index_names=index_names)
if not index:
row_type = "none"
elif not index_names and row_type.startswith("named"):
row_type = "un" + row_type
if not header:
column_type = "none"
elif not index_names and column_type.startswith("named"):
column_type = "un" + column_type
filename = "index_" + row_type + "_columns_" + column_type
expected = expected_html(datapath, filename)
assert result == expected
@pytest.mark.parametrize("index_names", [True, False])
@pytest.mark.parametrize("header", [True, False])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize(
"column_index, column_type",
[
(Index(np.arange(8)), "unnamed_standard"),
(Index(np.arange(8), name="columns.name"), "named_standard"),
(
MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]]),
"unnamed_multi",
),
(
MultiIndex.from_product(
[["a", "b"], ["c", "d"], ["e", "f"]], names=["foo", None, "baz"]
),
"named_multi",
),
],
)
@pytest.mark.parametrize(
"row_index, row_type",
[
(Index(np.arange(8)), "unnamed_standard"),
(Index(np.arange(8), name="index.name"), "named_standard"),
(
MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]]),
"unnamed_multi",
),
(
MultiIndex.from_product(
[["a", "b"], ["c", "d"], ["e", "f"]], names=["foo", None, "baz"]
),
"named_multi",
),
],
)
def test_to_html_alignment_with_truncation(
datapath, row_index, row_type, column_index, column_type, index, header, index_names
):
# GH 22747, GH 22579
df = DataFrame(np.arange(64).reshape(8, 8), index=row_index, columns=column_index)
result = df.to_html(
max_rows=4, max_cols=4, index=index, header=header, index_names=index_names
)
if not index:
row_type = "none"
elif not index_names and row_type.startswith("named"):
row_type = "un" + row_type
if not header:
column_type = "none"
elif not index_names and column_type.startswith("named"):
column_type = "un" + column_type
filename = "trunc_df_index_" + row_type + "_columns_" + column_type
expected = expected_html(datapath, filename)
assert result == expected
@pytest.mark.parametrize("index", [False, 0])
def test_to_html_truncation_index_false_max_rows(datapath, index):
# GH 15019
data = [
[1.764052, 0.400157],
[0.978738, 2.240893],
[1.867558, -0.977278],
[0.950088, -0.151357],
[-0.103219, 0.410599],
]
df = DataFrame(data)
result = df.to_html(max_rows=4, index=index)
expected = expected_html(datapath, "gh15019_expected_output")
assert result == expected
@pytest.mark.parametrize("index", [False, 0])
@pytest.mark.parametrize(
"col_index_named, expected_output",
[(False, "gh22783_expected_output"), (True, "gh22783_named_columns_index")],
)
def test_to_html_truncation_index_false_max_cols(
datapath, index, col_index_named, expected_output
):
# GH 22783
data = [
[1.764052, 0.400157, 0.978738, 2.240893, 1.867558],
[-0.977278, 0.950088, -0.151357, -0.103219, 0.410599],
]
df = DataFrame(data)
if col_index_named:
df.columns.rename("columns.name", inplace=True)
result = df.to_html(max_cols=4, index=index)
expected = expected_html(datapath, expected_output)
assert result == expected
@pytest.mark.parametrize("notebook", [True, False])
def test_to_html_notebook_has_style(notebook):
df = DataFrame({"A": [1, 2, 3]})
result = df.to_html(notebook=notebook)
if notebook:
assert "tbody tr th:only-of-type" in result
assert "vertical-align: middle;" in result
assert "thead th" in result
else:
assert "tbody tr th:only-of-type" not in result
assert "vertical-align: middle;" not in result
assert "thead th" not in result
def test_to_html_with_index_names_false():
# GH 16493
df = DataFrame({"A": [1, 2]}, index=Index(["a", "b"], name="myindexname"))
result = df.to_html(index_names=False)
assert "myindexname" not in result
def test_to_html_with_id():
# GH 8496
df = DataFrame({"A": [1, 2]}, index=Index(["a", "b"], name="myindexname"))
result = df.to_html(index_names=False, table_id="TEST_ID")
assert ' id="TEST_ID"' in result
@pytest.mark.parametrize(
"value,float_format,expected",
[
(0.19999, "%.3f", "gh21625_expected_output"),
(100.0, "%.0f", "gh22270_expected_output"),
],
)
def test_to_html_float_format_no_fixed_width(value, float_format, expected, datapath):
# GH 21625, GH 22270
df = DataFrame({"x": [value]})
expected = expected_html(datapath, expected)
result = df.to_html(float_format=float_format)
assert result == expected
@pytest.mark.parametrize(
"render_links,expected",
[(True, "render_links_true"), (False, "render_links_false")],
)
def test_to_html_render_links(render_links, expected, datapath):
# GH 2679
data = [
[0, "http://pandas.pydata.org/?q1=a&q2=b", "pydata.org"],
[0, "www.pydata.org", "pydata.org"],
]
df = DataFrame(data, columns=["foo", "bar", None])
result = df.to_html(render_links=render_links)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize(
"method,expected",
[
("to_html", lambda x: lorem_ipsum),
("_repr_html_", lambda x: lorem_ipsum[: x - 4] + "..."), # regression case
],
)
@pytest.mark.parametrize("max_colwidth", [10, 20, 50, 100])
def test_ignore_display_max_colwidth(method, expected, max_colwidth):
# see gh-17004
df = DataFrame([lorem_ipsum])
with pd.option_context("display.max_colwidth", max_colwidth):
result = getattr(df, method)()
expected = expected(max_colwidth)
assert expected in result
@pytest.mark.parametrize("classes", [True, 0])
def test_to_html_invalid_classes_type(classes):
# GH 25608
df = DataFrame()
msg = "classes must be a string, list, or tuple"
with pytest.raises(TypeError, match=msg):
df.to_html(classes=classes)
def test_to_html_round_column_headers():
# GH 17280
df = DataFrame([1], columns=[0.55555])
with pd.option_context("display.precision", 3):
html = df.to_html(notebook=False)
notebook = df.to_html(notebook=True)
assert "0.55555" in html
assert "0.556" in notebook
@pytest.mark.parametrize("unit", ["100px", "10%", "5em", 150])
def test_to_html_with_col_space_units(unit):
# GH 25941
df = DataFrame(np.random.random(size=(1, 3)))
result = df.to_html(col_space=unit)
result = result.split("tbody")[0]
hdrs = [x for x in result.split("\n") if re.search(r"<th[>\s]", x)]
if isinstance(unit, int):
unit = str(unit) + "px"
for h in hdrs:
expected = '<th style="min-width: {unit};">'.format(unit=unit)
assert expected in h
def test_html_repr_min_rows_default(datapath):
# gh-27991
# default setting no truncation even if above min_rows
df = pd.DataFrame({"a": range(20)})
result = df._repr_html_()
expected = expected_html(datapath, "html_repr_min_rows_default_no_truncation")
assert result == expected
# default of max_rows 60 triggers truncation if above
df = pd.DataFrame({"a": range(61)})
result = df._repr_html_()
expected = expected_html(datapath, "html_repr_min_rows_default_truncated")
assert result == expected
@pytest.mark.parametrize(
"max_rows,min_rows,expected",
[
# truncated after first two rows
(10, 4, "html_repr_max_rows_10_min_rows_4"),
# when set to None, follow value of max_rows
(12, None, "html_repr_max_rows_12_min_rows_None"),
# when set value higher as max_rows, use the minimum
(10, 12, "html_repr_max_rows_10_min_rows_12"),
# max_rows of None -> never truncate
(None, 12, "html_repr_max_rows_None_min_rows_12"),
],
)
def test_html_repr_min_rows(datapath, max_rows, min_rows, expected):
# gh-27991
df = pd.DataFrame({"a": range(61)})
expected = expected_html(datapath, expected)
with option_context("display.max_rows", max_rows, "display.min_rows", min_rows):
result = df._repr_html_()
assert result == expected
| 31.87706 | 88 | 0.615522 |
d527f4372310ee4c76af52e21eca48e6eabf354d
| 6,417 |
py
|
Python
|
src/python/pants/backend/python/tasks/checkstyle/import_order.py
|
qma/pants
|
604f58a366b66bc5cfa83e7250cb8af8130832cf
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/python/tasks/checkstyle/import_order.py
|
qma/pants
|
604f58a366b66bc5cfa83e7250cb8af8130832cf
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/python/tasks/checkstyle/import_order.py
|
qma/pants
|
604f58a366b66bc5cfa83e7250cb8af8130832cf
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import ast
import os
from distutils import sysconfig
from pants.backend.python.tasks.checkstyle.common import CheckstylePlugin
class ImportType(object):
"""Enforce a consistent import order.
Imports are currently grouped into five separate groups:
stdlib
twitter
gen
package-local
third-party
Imports should be in this order and separated by a single space.
"""
STDLIB = 1
TWITTER = 2
GEN = 3
PACKAGE = 4
THIRD_PARTY = 5
UNKNOWN = 0
NAMES = {
UNKNOWN: 'unknown',
STDLIB: 'stdlib',
TWITTER: 'twitter',
GEN: 'gen',
PACKAGE: 'package',
THIRD_PARTY: '3rdparty'
}
@classmethod
def order_names(cls, import_order):
return ' '.join(cls.NAMES.get(import_id, 'unknown') for import_id in import_order)
class ImportOrder(CheckstylePlugin):
# TODO(wickman)
# - Warn if a package is marked as a 3rdparty but it's actually a package
# in the current working directory that should be a package-absolute
# import (i.e. from __future__ import absolute_imports)
STANDARD_LIB_PATH = os.path.realpath(sysconfig.get_python_lib(standard_lib=1))
@classmethod
def extract_import_modules(cls, node):
if isinstance(node, ast.Import):
return [alias.name for alias in node.names]
elif isinstance(node, ast.ImportFrom):
return [node.module]
return []
@classmethod
def classify_import(cls, node, name):
if name == '' or (isinstance(node, ast.ImportFrom) and node.level > 0):
return ImportType.PACKAGE
if name.startswith('twitter.'):
return ImportType.TWITTER
if name.startswith('gen.'):
return ImportType.GEN
try:
module = __import__(name)
except ImportError:
return ImportType.THIRD_PARTY
if (not hasattr(module, '__file__') or
os.path.realpath(module.__file__).startswith(cls.STANDARD_LIB_PATH)):
return ImportType.STDLIB
# Assume anything we can't classify is third-party
return ImportType.THIRD_PARTY
@classmethod
def classify_import_node(cls, node):
return set(cls.classify_import(node, module_name)
for module_name in cls.extract_import_modules(node))
def import_errors(self, node):
errors = []
if isinstance(node, ast.ImportFrom):
if len(node.names) == 1 and node.names[0].name == '*':
errors.append(self.error('T400', 'Wildcard imports are not allowed.', node))
names = [alias.name.lower() for alias in node.names]
if names != sorted(names):
errors.append(self.error('T401', 'From import must import names in lexical order.', node))
if isinstance(node, ast.Import):
if len(node.names) > 1:
errors.append(self.error('T402',
'Absolute import statements should only import one module at a time.', node))
return errors
def classify_imports(self, chunk):
"""
Possible import statements:
import name
from name import subname
from name import subname1 as subname2
from name import *
from name import tuple
AST representations:
ImportFrom:
module=name
names=[alias(name, asname), ...]
name can be '*'
Import:
names=[alias(name, asname), ...]
Imports are classified into 5 classes:
stdlib => Python standard library
twitter.* => Twitter internal / standard library
gen.* => Thrift gen namespaces
.* => Package-local imports
3rdparty => site-packages or third party
classify_imports classifies the import into one of these forms.
"""
errors = []
all_module_types = set()
for node in chunk:
errors.extend(self.import_errors(node))
module_types = self.classify_import_node(node)
if len(module_types) > 1:
errors.append(self.error(
'T403',
'Import statement imports from multiple module types: {types}.'.format(
types=ImportType.order_names(module_types)),
node))
if ImportType.UNKNOWN in module_types:
errors.append(self.warning('T404', 'Unclassifiable import.', node))
all_module_types.update(module_types)
if len(chunk) > 0 and len(all_module_types) > 1:
errors.append(
self.error(
'T405',
'Import block starting here contains imports '
'from multiple module types: {types}.'.format(
types=ImportType.order_names(all_module_types)),
chunk[0].lineno))
return all_module_types, errors
# TODO(wickman) Classify imports within top-level try/except ImportError blocks.
def iter_import_chunks(self):
"""Iterate over space-separated import chunks in a file."""
chunk = []
last_line = None
for leaf in self.python_file.tree.body:
if isinstance(leaf, (ast.Import, ast.ImportFrom)):
# we've seen previous imports but this import is not in the same chunk
if last_line and leaf.lineno != last_line[1]:
yield chunk
chunk = [leaf]
# we've either not seen previous imports or this is part of the same chunk
elif not last_line or last_line and leaf.lineno == last_line[1]:
chunk.append(leaf)
last_line = self.python_file.logical_lines[leaf.lineno]
if chunk:
yield chunk
def nits(self):
errors = []
module_order = []
for chunk in self.iter_import_chunks():
module_types, chunk_errors = self.classify_imports(chunk)
errors.extend(chunk_errors)
module_order.append(list(module_types))
numbered_module_order = []
for modules in module_order:
if len(modules) > 0:
if modules[0] is not ImportType.UNKNOWN:
numbered_module_order.append(modules[0])
if numbered_module_order != sorted(numbered_module_order):
errors.append(self.error('T406',
'Out of order import chunks: Got {} and expect {}.'.format(
ImportType.order_names(numbered_module_order),
ImportType.order_names(sorted(numbered_module_order))),
self.python_file.tree))
return errors
| 32.739796 | 98 | 0.661212 |
bad30b629f3d91fa5a6817139503733829f1d0f5
| 41,037 |
py
|
Python
|
src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py
|
changwangss/transformers
|
321eb56222b1655a06a993a473becf467d6e2034
|
[
"Apache-2.0"
] | 1 |
2021-12-12T12:55:50.000Z
|
2021-12-12T12:55:50.000Z
|
src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py
|
changwangss/transformers
|
321eb56222b1655a06a993a473becf467d6e2034
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py
|
changwangss/transformers
|
321eb56222b1655a06a993a473becf467d6e2034
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Classes to support Vision-Encoder-Text-Decoder architectures """
import os
from typing import Optional, Tuple, Union
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict, unfreeze
from jax import lax
from jax.random import PRNGKey
from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput
from ...modeling_flax_utils import FlaxPreTrainedModel
from ...utils import logging
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "VisionEncoderDecoderConfig"
VISION_ENCODER_DECODER_START_DOCSTRING = r"""
This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model
as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via
:meth:`~transformers.AutoModel.from_pretrained` function and the decoder is loaded via
:meth:`~transformers.AutoModelForCausalLM.from_pretrained` function. Cross-attention layers are automatically added
to the decoder and should be fine-tuned on a downstream generative task, like image captioning.
The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
tasks was shown in `Leveraging Pre-trained Checkpoints for Sequence Generation Tasks
<https://arxiv.org/abs/1907.12461>`__ by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
Zhou, Wei Li, Peter J. Liu.
Additionally, in `TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models
<https://arxiv.org/abs/2109.10282>`__ it is shown how leveraging large pretrained vision models for optical
character recognition (OCR) yields a significant performance improvement.
After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any
other models (see the examples for more information).
This model inherits from :class:`~transformers.FlaxPreTrainedModel`. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading or saving, resizing the input
embeddings, pruning heads etc.)
This model is also a Flax Linen `flax.nn.Module
<https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html>`__ subclass. Use it as a regular Flax
Module and refer to the Flax documentation for all matter related to general usage and behavior.
Parameters:
config (:class:`~transformers.VisionEncoderDecoderConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.FlaxPreTrainedModel.from_pretrained` method to load the
model weights.
"""
VISION_ENCODER_DECODER_INPUTS_DOCSTRING = r"""
Args:
pixel_values (:obj:`jnp.ndarray` of shape :obj:`(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using the vision model's feature extractor. For example, using
:class:`~transformers.ViTFeatureExtractor`. See :meth:`transformers.ViTFeatureExtractor.__call__` for
details.
decoder_input_ids (:obj:`jnp.ndarray` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.PreTrainedTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are decoder input IDs? <../glossary.html#decoder-input-ids>`__
decoder_attention_mask (:obj:`jnp.ndarray` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
decoder_position_ids (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
range ``[0, config.decoder.max_position_embeddings - 1]``.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
If set to ``True``, the model will return a :class:`~transformers.file_utils.FlaxSeq2SeqLMOutput` instead
of a plain tuple.
"""
VISION_ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING = r"""
Args:
pixel_values (:obj:`jnp.ndarray` of shape :obj:`(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using the vision model's feature extractor. For example, using
:class:`~transformers.ViTFeatureExtractor`. See :meth:`transformers.ViTFeatureExtractor.__call__` for
details.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
If set to ``True``, the model will return a :class:`~transformers.file_utils.FlaxBaseModelOutput` instead
of a plain tuple.
"""
VISION_ENCODER_DECODER_DECODE_INPUTS_DOCSTRING = r"""
Args:
decoder_input_ids (:obj:`jnp.ndarray` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.PreTrainedTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are decoder input IDs? <../glossary.html#decoder-input-ids>`__
If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see
:obj:`past_key_values`).
For sequence to sequence training, :obj:`decoder_input_ids` should be provided. If no
:obj:`decoder_input_ids` is provided, the model will create this tensor by shifting the :obj:`input_ids` to
the right for denoising pre-training.
encoder_outputs (:obj:`tuple(tuple(jnp.ndarray)`):
Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`:
:obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`,
`optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
decoder_attention_mask (:obj:`jnp.ndarray` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
decoder_position_ids (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
range ``[0, config.decoder.max_position_embeddings - 1]``.
past_key_values (:obj:`Dict[str, jnp.ndarray]`, `optional`, returned by ``init_cache`` or when passing previous ``past_key_values``):
Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
auto-regressive decoding. Pre-computed key and value hidden-states are of shape `[batch_size, max_length]`.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
If set to ``True``, the model will return a
:class:`~transformers.file_utils.FlaxCausalLMOutputWithCrossAttentions` instead of a plain tuple.
"""
class FlaxVisionEncoderDecoderModule(nn.Module):
config: VisionEncoderDecoderConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
encoder_config = self.config.encoder
decoder_config = self.config.decoder
# Copied from `modeling_hybrid_clip.py` with modifications.
from ...models.auto.modeling_flax_auto import FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, FLAX_MODEL_MAPPING
encoder_module = FLAX_MODEL_MAPPING[encoder_config.__class__].module_class
decoder_module = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING[decoder_config.__class__].module_class
self.encoder = encoder_module(encoder_config, dtype=self.dtype)
self.decoder = decoder_module(decoder_config, dtype=self.dtype)
# encoder outputs might need to be projected to different dimension for decoder
if (
self.encoder.config.hidden_size != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
self.enc_to_dec_proj = nn.Dense(
self.decoder.config.hidden_size,
kernel_init=jax.nn.initializers.normal(self.decoder.config.initializer_range, self.dtype),
dtype=self.dtype,
)
else:
self.enc_to_dec_proj = None
def _get_encoder_module(self):
return self.encoder
def _get_projection_module(self):
return self.enc_to_dec_proj
def _get_decoder_module(self):
return self.decoder
def __call__(
self,
pixel_values,
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
encoder_outputs = self.encoder(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
encoder_hidden_states = encoder_outputs[0]
# optionally project encoder_hidden_states
if self.enc_to_dec_proj is not None:
encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
# The advantage of explicitly setting this is TPU XLA compiler knows as soon as possible what shape this
# variable has and can better optimize. Also passing `None` can lead to some problems when jitting the model.
# In Flax/JAX, we only want to pass `None` for non-tensor function inputs. For all tensor function inputs, we
# should always pass a tensor and not `None`.
batch_size, sequence_length = encoder_hidden_states.shape[:2]
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
position_ids=decoder_position_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return FlaxSeq2SeqLMOutput(
logits=decoder_outputs.logits,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(VISION_ENCODER_DECODER_START_DOCSTRING)
class FlaxVisionEncoderDecoderModel(FlaxPreTrainedModel):
r"""
:class:`~transformers.FlaxVisionEncoderDecoderModel` is a generic model class that will be instantiated as a
transformer architecture with the module (flax.nn.Module) of one of the base vision model classes of the library as
encoder module and another one as decoder module when created with the
:meth`~transformers.FlaxAutoModel.from_pretrained` class method for the encoder and
:meth`~transformers.FlaxAutoModelForCausalLM.from_pretrained` class method for the decoder.
"""
config_class = VisionEncoderDecoderConfig
base_model_prefix = "vision_encoder_decoder"
module_class = FlaxVisionEncoderDecoderModule
def __init__(
self,
config: VisionEncoderDecoderConfig,
input_shape: Optional[Tuple] = None,
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
**kwargs
):
if input_shape is None:
num_channels = getattr(config.encoder, "num_channels", 3)
input_shape = (
(1, config.encoder.image_size, config.encoder.image_size, num_channels),
(1, 1),
)
if config.decoder.cross_attention_hidden_size is not None:
if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
raise ValueError(
f"If `cross_attention_hidden_size` is specified in the decoder's configuration, "
f"it has to be equal to the encoder's `hidden_size`."
f"Got {config.decoder.cross_attention_hidden_size} for `config.decoder.cross_attention_hidden_size` "
f"and {config.encoder.hidden_size} for `config.encoder.hidden_size`."
)
module = self.module_class(config=config, dtype=dtype, **kwargs)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple) -> FrozenDict:
encoder_input_shape, decoder_input_shape = input_shape
# init input tensors
pixel_values = jnp.zeros(encoder_input_shape, dtype=self.dtype)
decoder_input_ids = jnp.zeros(decoder_input_shape, dtype="i4")
decoder_attention_mask = jnp.ones_like(decoder_input_ids)
batch_size, _, _, _ = pixel_values.shape
decoder_batch_size, decoder_sequence_length = decoder_input_ids.shape
if not decoder_batch_size == batch_size:
raise ValueError(
f"The inputs of encoder and decoder should have the same batch size, but got {batch_size} for encoder "
f"and {decoder_batch_size} for decoder."
)
decoder_position_ids = jnp.broadcast_to(
jnp.arange(decoder_sequence_length)[None, :], (decoder_batch_size, decoder_sequence_length)
)
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
return self.module.init(
rngs,
pixel_values,
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
)["params"]
def init_cache(self, batch_size, max_length, encoder_outputs):
r"""
Args:
batch_size (:obj:`int`):
batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
max_length (:obj:`int`):
maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
cache.
encoder_outputs (:obj:`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
``encoder_outputs`` consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`,
`optional`: :obj:`attentions`). :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length,
hidden_size)`, `optional`) is a sequence of hidden-states at the output of the last layer of the
encoder. Used in the cross-attention of the decoder.
"""
# init input variables to retrieve cache
decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
decoder_attention_mask = jnp.ones_like(decoder_input_ids)
decoder_position_ids = jnp.broadcast_to(
jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
)
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
return decoder_module(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
position_ids=decoder_position_ids,
**kwargs,
)
init_variables = self.module.init(
jax.random.PRNGKey(0),
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_hidden_states=encoder_outputs[0],
init_cache=True,
method=_decoder_forward, # we only need to call the decoder to init the cache
)
return unfreeze(init_variables["cache"])
@add_start_docstrings(VISION_ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=_CONFIG_FOR_DOC)
def encode(
self,
pixel_values: jnp.ndarray,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example::
>>> from transformers import FlaxVisionEncoderDecoderModel
>>> from PIL import Image
>>> import requests
>>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k')
>>> # initialize a vit-gpt2 from pretrained ViT and GPT2 models. Note that the cross-attention layers will be randomly initialized
>>> model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained('vit', 'gpt2')
>>> pixel_values = feature_extractor(images=image, return_tensors="np").pixel_values
>>> encoder_outputs = model.encode(pixel_values)
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
# `FlaxViTModel` expects channel first format, but `FlaxViTModule` expects channel last format.
# Currently, we assume this holds for all Flax vision models, and perform a transpose here.
pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
def _encoder_forward(module, pixel_values, **kwargs):
encode_module = module._get_encoder_module()
return encode_module(pixel_values, **kwargs)
outputs = self.module.apply(
{"params": params or self.params},
pixel_values=jnp.array(pixel_values, dtype=self.dtype),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
method=_encoder_forward,
)
if return_dict:
outputs = FlaxBaseModelOutput(
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
return outputs
@add_start_docstrings(VISION_ENCODER_DECODER_DECODE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def decode(
self,
decoder_input_ids,
encoder_outputs,
decoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
past_key_values: dict = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example::
>>> from transformers import FlaxVisionEncoderDecoderModel
>>> import jax.numpy as jnp
>>> from PIL import Image
>>> import requests
>>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k')
>>> # initialize a vit-gpt2 from pretrained ViT and GPT2 models. Note that the cross-attention layers will be randomly initialized
>>> model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained('vit', 'gpt2')
>>> pixel_values = feature_extractor(images=image, return_tensors="np").pixel_values
>>> encoder_outputs = model.encode(pixel_values)
>>> decoder_start_token_id = model.config.decoder.bos_token_id
>>> decoder_input_ids = jnp.ones((pixel_values.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> logits = outputs.logits
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
encoder_hidden_states = encoder_outputs[0]
batch_size, sequence_length = encoder_hidden_states.shape[:2]
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
batch_size, sequence_length = decoder_input_ids.shape
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
if decoder_position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be
# passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
# it can be changed by FlaxBartAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
def _decoder_forward(
module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_hidden_states, **kwargs
):
projection_module = module._get_projection_module()
decoder_module = module._get_decoder_module()
# optionally project encoder_hidden_states
if projection_module is not None:
encoder_hidden_states = projection_module(encoder_hidden_states)
return decoder_module(
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
encoder_hidden_states,
**kwargs,
)
outputs = self.module.apply(
inputs,
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
mutable=mutable,
method=_decoder_forward,
)
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs, past = outputs
outputs["past_key_values"] = unfreeze(past["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs, past = outputs
outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
return outputs
@add_start_docstrings_to_model_forward(VISION_ENCODER_DECODER_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def __call__(
self,
pixel_values: jnp.ndarray,
decoder_input_ids: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Examples::
>>> from transformers import FlaxVisionEncoderDecoderModel, ViTFeatureExtractor, GPT2Tokenizer
>>> from PIL import Image
>>> import requests
>>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k')
>>> # load output tokenizer
>>> tokenizer_output = GPT2Tokenizer.from_pretrained('gpt2')
>>> # initialize a vit-gpt2 from pretrained ViT and GPT2 models. Note that the cross-attention layers will be randomly initialized
>>> model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained('vit', 'gpt2')
>>> pixel_values = feature_extractor(images=image, return_tensors="np").pixel_values
>>> # use GPT2's eos_token as the pad as well as eos token
>>> model.config.eos_token_id = model.config.decoder.eos_token_id
>>> model.config.pad_token_id = model.config.eos_token_id
>>> # generation
>>> sequences = model.generate(pixel_values, num_beams=4, max_length=12).sequences
>>> captions = tokenizer_output.batch_decode(sequences, skip_special_tokens=True)
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
# prepare encoder inputs
# `FlaxViTModel` expects channel first format, but `FlaxViTModule` expects channel last format.
# Currently, we assume this holds for all Flax vision models, and perform a transpose here.
pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
# prepare decoder inputs
if decoder_input_ids is None:
raise ValueError("`decoder_input_ids` can't be `None`.")
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones_like(decoder_input_ids)
if decoder_position_ids is None:
batch_size, sequence_length = decoder_input_ids.shape
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
return self.module.apply(
{"params": params or self.params},
pixel_values=jnp.array(pixel_values, dtype=self.dtype),
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
max_length,
decoder_attention_mask: Optional[jnp.DeviceArray] = None,
encoder_outputs=None,
**kwargs
):
# initializing the cache
batch_size, seq_length = decoder_input_ids.shape
past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
# Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
# But since the decoder uses a causal mask, those positions are masked anyways.
# Thus we can create a single static attention_mask here, which is more efficient for compilation
extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
if decoder_attention_mask is not None:
decoder_position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
else:
decoder_position_ids = jnp.broadcast_to(
jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)
)
return {
"past_key_values": past_key_values,
"encoder_outputs": encoder_outputs,
"decoder_attention_mask": extended_attention_mask,
"decoder_position_ids": decoder_position_ids,
}
def update_inputs_for_generation(self, model_outputs, model_kwargs):
model_kwargs["past_key_values"] = model_outputs.past_key_values
model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
return model_kwargs
@classmethod
def from_encoder_decoder_pretrained(
cls,
encoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None,
decoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None,
*model_args,
**kwargs
) -> FlaxPreTrainedModel:
r"""
Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
checkpoints.
Params:
encoder_pretrained_model_name_or_path (:obj: `Union[str, os.PathLike]`, `optional`):
Information necessary to initiate the encoder. Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co. An
example is ``google/vit-base-patch16-224-in21k``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.FlaxPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
decoder_pretrained_model_name_or_path (:obj: `Union[str, os.PathLike]`, `optional`, defaults to `None`):
Information necessary to initiate the decoder. Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.FlaxPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
model_args (remaining positional arguments, `optional`):
All remaning positional arguments will be passed to the underlying model's ``__init__`` method.
kwargs (remaining dictionary of keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`).
- To update the encoder configuration, use the prefix `encoder_` for each configuration parameter.
- To update the decoder configuration, use the prefix `decoder_` for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a :obj:`config` is provided or automatically loaded.
Example::
>>> from transformers import FlaxVisionEncoderDecoderModel
>>> # initialize a vit-gpt2 from a pretrained ViT and a pretrained GPT2 model. Note that the cross-attention layers will be randomly initialized
>>> model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained('google/vit-base-patch16-224-in21k', 'gpt2')
>>> # saving model after fine-tuning
>>> model.save_pretrained("./vit-gpt2")
>>> # load fine-tuned model
>>> model = FlaxVisionEncoderDecoderModel.from_pretrained("./vit-gpt2")
"""
kwargs_encoder = {
argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
# remove encoder, decoder kwargs from kwargs
for key in kwargs_encoder.keys():
del kwargs["encoder_" + key]
for key in kwargs_decoder.keys():
del kwargs["decoder_" + key]
# Load and initialize the encoder and decoder
# The distinction between encoder and decoder at the model level is made
# by the value of the flag `is_decoder` that we need to set correctly.
encoder = kwargs_encoder.pop("model", None)
if encoder is None:
if encoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
"to be defined"
)
from ..auto.modeling_flax_auto import FlaxAutoModel
if "config" not in kwargs_encoder:
from ..auto.configuration_auto import AutoConfig
encoder_config = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path)
if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
logger.info(
f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model from a decoder "
"model. Cross-attention and casual mask are disabled."
)
encoder_config.is_decoder = False
encoder_config.add_cross_attention = False
kwargs_encoder["config"] = encoder_config
encoder = FlaxAutoModel.from_pretrained(
encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder
)
decoder = kwargs_decoder.pop("model", None)
if decoder is None:
if decoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
"to be defined."
)
from ..auto.modeling_flax_auto import FlaxAutoModelForCausalLM
if "config" not in kwargs_decoder:
from ..auto.configuration_auto import AutoConfig
decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path)
if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
logger.info(
f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention "
f"layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if "
f"{decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
)
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
kwargs_decoder["config"] = decoder_config
if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
logger.warning(
f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. In order "
f"to initialize {decoder_pretrained_model_name_or_path} as a decoder, make sure that the "
"attributes `is_decoder` and `add_cross_attention` of `decoder_config` passed to "
"`.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a `decoder_config` to "
"`.from_encoder_decoder_pretrained(...)`"
)
decoder = FlaxAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
# instantiate config with corresponding kwargs
dtype = kwargs.pop("dtype", jnp.float32)
config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
# init model
model = cls(config, dtype=dtype)
model.params["encoder"] = encoder.params
model.params["decoder"] = decoder.params
return model
| 48.853571 | 156 | 0.669932 |
3cb94c973371cf72eec089d03c0077caa8625f24
| 127,950 |
py
|
Python
|
nova/tests/unit/virt/vmwareapi/test_vmops.py
|
hyphon81/nova-for-gpu-passthrough
|
7c164980d7355d8fc40a6b155e31e325191b6a5e
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/virt/vmwareapi/test_vmops.py
|
hyphon81/nova-for-gpu-passthrough
|
7c164980d7355d8fc40a6b155e31e325191b6a5e
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/virt/vmwareapi/test_vmops.py
|
hyphon81/nova-for-gpu-passthrough
|
7c164980d7355d8fc40a6b155e31e325191b6a5e
|
[
"Apache-2.0"
] | 1 |
2020-07-24T00:41:18.000Z
|
2020-07-24T00:41:18.000Z
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import vim_util as vutil
import six
from nova.compute import power_state
from nova import context
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova import test
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
import nova.tests.unit.image.fake
from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.tests import uuidsentinel
from nova import utils
from nova import version
from nova.virt import hardware
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
class DsPathMatcher(object):
def __init__(self, expected_ds_path_str):
self.expected_ds_path_str = expected_ds_path_str
def __eq__(self, ds_path_param):
return str(ds_path_param) == self.expected_ds_path_str
class VMwareVMOpsTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMOpsTestCase, self).setUp()
ds_util.dc_cache_reset()
vmwareapi_fake.reset()
stubs.set_stubs(self)
self.flags(enabled=True, group='vnc')
self.flags(image_cache_subdirectory_name='vmware_base',
my_ip='',
flat_injected=True)
self._context = context.RequestContext('fake_user', 'fake_project')
self._session = driver.VMwareAPISession()
self._virtapi = mock.Mock()
self._image_id = nova.tests.unit.image.fake.get_valid_image_id()
fake_ds_ref = vmwareapi_fake.ManagedObjectReference('fake-ds')
self._ds = ds_obj.Datastore(
ref=fake_ds_ref, name='fake_ds',
capacity=10 * units.Gi,
freespace=10 * units.Gi)
self._dc_info = ds_util.DcInfo(
ref='fake_dc_ref', name='fake_dc',
vmFolder='fake_vm_folder')
cluster = vmwareapi_fake.create_cluster('fake_cluster', fake_ds_ref)
self._uuid = uuidsentinel.foo
self._instance_values = {
'name': 'fake_name',
'display_name': 'fake_display_name',
'uuid': self._uuid,
'vcpus': 1,
'memory_mb': 512,
'image_ref': self._image_id,
'root_gb': 10,
'node': '%s(%s)' % (cluster.mo_id, cluster.name),
'expected_attrs': ['system_metadata'],
}
self._instance = fake_instance.fake_instance_obj(
self._context, **self._instance_values)
self._flavor = objects.Flavor(name='m1.small', memory_mb=512, vcpus=1,
root_gb=10, ephemeral_gb=0, swap=0,
extra_specs={})
self._instance.flavor = self._flavor
self._vmops = vmops.VMwareVMOps(self._session, self._virtapi, None,
cluster=cluster.obj)
self._cluster = cluster
self._image_meta = objects.ImageMeta.from_dict({'id': self._image_id})
subnet_4 = network_model.Subnet(cidr='192.168.0.1/24',
dns=[network_model.IP('192.168.0.1')],
gateway=
network_model.IP('192.168.0.1'),
ips=[
network_model.IP('192.168.0.100')],
routes=None)
subnet_6 = network_model.Subnet(cidr='dead:beef::1/64',
dns=None,
gateway=
network_model.IP('dead:beef::1'),
ips=[network_model.IP(
'dead:beef::dcad:beff:feef:0')],
routes=None)
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
subnets=[subnet_4, subnet_6],
vlan=None,
bridge_interface=None,
injected=True)
self._network_values = {
'id': None,
'address': 'DE:AD:BE:EF:00:00',
'network': network,
'type': None,
'devname': None,
'ovs_interfaceid': None,
'rxtx_cap': 3
}
self.network_info = network_model.NetworkInfo([
network_model.VIF(**self._network_values)
])
pure_IPv6_network = network_model.Network(id=0,
bridge='fa0',
label='fake',
subnets=[subnet_6],
vlan=None,
bridge_interface=None,
injected=True)
self.pure_IPv6_network_info = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=pure_IPv6_network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])
self._metadata = (
"name:fake_display_name\n"
"userid:fake_user\n"
"username:None\n"
"projectid:fake_project\n"
"projectname:None\n"
"flavor:name:m1.micro\n"
"flavor:memory_mb:6\n"
"flavor:vcpus:28\n"
"flavor:ephemeral_gb:8128\n"
"flavor:root_gb:496\n"
"flavor:swap:33550336\n"
"imageid:70a599e0-31e7-49b7-b260-868f441e862b\n"
"package:%s\n" % version.version_string_with_package())
def test_get_machine_id_str(self):
result = vmops.VMwareVMOps._get_machine_id_str(self.network_info)
self.assertEqual('DE:AD:BE:EF:00:00;192.168.0.100;255.255.255.0;'
'192.168.0.1;192.168.0.255;192.168.0.1#', result)
result = vmops.VMwareVMOps._get_machine_id_str(
self.pure_IPv6_network_info)
self.assertEqual('DE:AD:BE:EF:00:00;;;;;#', result)
def _setup_create_folder_mocks(self):
ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
base_name = 'folder'
ds_name = "datastore"
ds_ref = mock.Mock()
ds_ref.value = 1
dc_ref = mock.Mock()
ds_util._DS_DC_MAPPING[ds_ref.value] = ds_util.DcInfo(
ref=dc_ref,
name='fake-name',
vmFolder='fake-folder')
path = ds_obj.DatastorePath(ds_name, base_name)
return ds_name, ds_ref, ops, path, dc_ref
@mock.patch.object(ds_util, 'mkdir')
def test_create_folder_if_missing(self, mock_mkdir):
ds_name, ds_ref, ops, path, dc = self._setup_create_folder_mocks()
ops._create_folder_if_missing(ds_name, ds_ref, 'folder')
mock_mkdir.assert_called_with(ops._session, path, dc)
@mock.patch.object(ds_util, 'mkdir')
def test_create_folder_if_missing_exception(self, mock_mkdir):
ds_name, ds_ref, ops, path, dc = self._setup_create_folder_mocks()
ds_util.mkdir.side_effect = vexc.FileAlreadyExistsException()
ops._create_folder_if_missing(ds_name, ds_ref, 'folder')
mock_mkdir.assert_called_with(ops._session, path, dc)
@mock.patch.object(vutil, 'continue_retrieval', return_value=None)
def test_get_valid_vms_from_retrieve_result(self, _mock_cont):
ops = vmops.VMwareVMOps(self._session, mock.Mock(), mock.Mock())
fake_objects = vmwareapi_fake.FakeRetrieveResult()
for x in range(0, 3):
vm = vmwareapi_fake.VirtualMachine()
vm.set('config.extraConfig["nvp.vm-uuid"]',
vmwareapi_fake.OptionValue(
value=uuidutils.generate_uuid()))
fake_objects.add_object(vm)
vms = ops._get_valid_vms_from_retrieve_result(fake_objects)
self.assertEqual(3, len(vms))
@mock.patch.object(vutil, 'continue_retrieval', return_value=None)
def test_get_valid_vms_from_retrieve_result_with_invalid(self,
_mock_cont):
ops = vmops.VMwareVMOps(self._session, mock.Mock(), mock.Mock())
fake_objects = vmwareapi_fake.FakeRetrieveResult()
valid_vm = vmwareapi_fake.VirtualMachine()
valid_vm.set('config.extraConfig["nvp.vm-uuid"]',
vmwareapi_fake.OptionValue(
value=uuidutils.generate_uuid()))
fake_objects.add_object(valid_vm)
invalid_vm1 = vmwareapi_fake.VirtualMachine()
invalid_vm1.set('runtime.connectionState', 'orphaned')
invalid_vm1.set('config.extraConfig["nvp.vm-uuid"]',
vmwareapi_fake.OptionValue(
value=uuidutils.generate_uuid()))
invalid_vm2 = vmwareapi_fake.VirtualMachine()
invalid_vm2.set('runtime.connectionState', 'inaccessible')
invalid_vm2.set('config.extraConfig["nvp.vm-uuid"]',
vmwareapi_fake.OptionValue(
value=uuidutils.generate_uuid()))
fake_objects.add_object(invalid_vm1)
fake_objects.add_object(invalid_vm2)
vms = ops._get_valid_vms_from_retrieve_result(fake_objects)
self.assertEqual(1, len(vms))
def test_delete_vm_snapshot(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('RemoveSnapshot_Task', method)
self.assertEqual('fake_vm_snapshot', args[0])
self.assertFalse(kwargs['removeChildren'])
self.assertTrue(kwargs['consolidate'])
return 'fake_remove_snapshot_task'
with test.nested(
mock.patch.object(self._session, '_wait_for_task'),
mock.patch.object(self._session, '_call_method', fake_call_method)
) as (_wait_for_task, _call_method):
self._vmops._delete_vm_snapshot(self._instance,
"fake_vm_ref", "fake_vm_snapshot")
_wait_for_task.assert_has_calls([
mock.call('fake_remove_snapshot_task')])
def test_create_vm_snapshot(self):
method_list = ['CreateSnapshot_Task', 'get_object_property']
def fake_call_method(module, method, *args, **kwargs):
expected_method = method_list.pop(0)
self.assertEqual(expected_method, method)
if (expected_method == 'CreateSnapshot_Task'):
self.assertEqual('fake_vm_ref', args[0])
self.assertFalse(kwargs['memory'])
self.assertTrue(kwargs['quiesce'])
return 'fake_snapshot_task'
elif (expected_method == 'get_object_property'):
task_info = mock.Mock()
task_info.result = "fake_snapshot_ref"
self.assertEqual(('fake_snapshot_task', 'info'), args)
return task_info
with test.nested(
mock.patch.object(self._session, '_wait_for_task'),
mock.patch.object(self._session, '_call_method', fake_call_method)
) as (_wait_for_task, _call_method):
snap = self._vmops._create_vm_snapshot(self._instance,
"fake_vm_ref")
self.assertEqual("fake_snapshot_ref", snap)
_wait_for_task.assert_has_calls([
mock.call('fake_snapshot_task')])
def test_update_instance_progress(self):
with mock.patch.object(self._instance, 'save') as mock_save:
self._vmops._update_instance_progress(self._instance._context,
self._instance, 5, 10)
mock_save.assert_called_once_with()
self.assertEqual(50, self._instance.progress)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
def test_get_info(self, mock_get_vm_ref):
result = {
'summary.config.numCpu': 4,
'summary.config.memorySizeMB': 128,
'runtime.powerState': 'poweredOn'
}
def mock_call_method(module, method, *args, **kwargs):
if method == 'continue_retrieval':
return
return result
with mock.patch.object(self._session, '_call_method',
mock_call_method):
info = self._vmops.get_info(self._instance)
mock_get_vm_ref.assert_called_once_with(self._session,
self._instance)
expected = hardware.InstanceInfo(state=power_state.RUNNING,
max_mem_kb=128 * 1024,
mem_kb=128 * 1024,
num_cpu=4)
self.assertEqual(expected, info)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
def test_get_info_when_ds_unavailable(self, mock_get_vm_ref):
result = {
'runtime.powerState': 'poweredOff'
}
def mock_call_method(module, method, *args, **kwargs):
if method == 'continue_retrieval':
return
return result
with mock.patch.object(self._session, '_call_method',
mock_call_method):
info = self._vmops.get_info(self._instance)
mock_get_vm_ref.assert_called_once_with(self._session,
self._instance)
self.assertEqual(hardware.InstanceInfo(state=power_state.SHUTDOWN),
info)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
def test_get_info_instance_deleted(self, mock_get_vm_ref):
props = ['summary.config.numCpu', 'summary.config.memorySizeMB',
'runtime.powerState']
prop_cpu = vmwareapi_fake.Prop(props[0], 4)
prop_mem = vmwareapi_fake.Prop(props[1], 128)
prop_state = vmwareapi_fake.Prop(props[2], 'poweredOn')
prop_list = [prop_state, prop_mem, prop_cpu]
obj_content = vmwareapi_fake.ObjectContent(None, prop_list=prop_list)
result = vmwareapi_fake.FakeRetrieveResult()
result.add_object(obj_content)
def mock_call_method(module, method, *args, **kwargs):
raise vexc.ManagedObjectNotFoundException()
with mock.patch.object(self._session, '_call_method',
mock_call_method):
self.assertRaises(exception.InstanceNotFound,
self._vmops.get_info,
self._instance)
mock_get_vm_ref.assert_called_once_with(self._session,
self._instance)
def _test_get_datacenter_ref_and_name(self, ds_ref_exists=False):
instance_ds_ref = mock.Mock()
instance_ds_ref.value = "ds-1"
_vcvmops = vmops.VMwareVMOps(self._session, None, None)
if ds_ref_exists:
ds_ref = mock.Mock()
ds_ref.value = "ds-1"
else:
ds_ref = None
self._continue_retrieval = True
self._fake_object1 = vmwareapi_fake.FakeRetrieveResult()
self._fake_object2 = vmwareapi_fake.FakeRetrieveResult()
def fake_call_method(module, method, *args, **kwargs):
self._fake_object1.add_object(vmwareapi_fake.Datacenter(
ds_ref=ds_ref))
if not ds_ref:
# Token is set for the fake_object1, so it will continue to
# fetch the next object.
setattr(self._fake_object1, 'token', 'token-0')
if self._continue_retrieval:
if self._continue_retrieval:
self._continue_retrieval = False
self._fake_object2.add_object(
vmwareapi_fake.Datacenter())
return self._fake_object2
return
if method == "continue_retrieval":
return
return self._fake_object1
with mock.patch.object(self._session, '_call_method',
side_effect=fake_call_method) as fake_call:
dc_info = _vcvmops.get_datacenter_ref_and_name(instance_ds_ref)
if ds_ref:
self.assertEqual(1, len(ds_util._DS_DC_MAPPING))
calls = [mock.call(vim_util, "get_objects", "Datacenter",
["name", "datastore", "vmFolder"]),
mock.call(vutil, 'continue_retrieval',
self._fake_object1)]
fake_call.assert_has_calls(calls)
self.assertEqual("ha-datacenter", dc_info.name)
else:
calls = [mock.call(vim_util, "get_objects", "Datacenter",
["name", "datastore", "vmFolder"]),
mock.call(vutil, 'continue_retrieval',
self._fake_object2)]
fake_call.assert_has_calls(calls)
self.assertIsNone(dc_info)
def test_get_datacenter_ref_and_name(self):
self._test_get_datacenter_ref_and_name(ds_ref_exists=True)
def test_get_datacenter_ref_and_name_with_no_datastore(self):
self._test_get_datacenter_ref_and_name()
@mock.patch('nova.image.api.API.get')
@mock.patch.object(vm_util, 'power_off_instance')
@mock.patch.object(ds_util, 'disk_copy')
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
@mock.patch.object(vm_util, 'find_rescue_device')
@mock.patch.object(vm_util, 'get_vm_boot_spec')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'power_on_instance')
@mock.patch.object(ds_obj, 'get_datastore_by_ref')
def test_rescue(self, mock_get_ds_by_ref, mock_power_on, mock_reconfigure,
mock_get_boot_spec, mock_find_rescue,
mock_get_vm_ref, mock_disk_copy,
mock_power_off, mock_glance):
_volumeops = mock.Mock()
self._vmops._volumeops = _volumeops
ds_ref = vmwareapi_fake.ManagedObjectReference(value='fake-ref')
ds = ds_obj.Datastore(ds_ref, 'ds1')
mock_get_ds_by_ref.return_value = ds
mock_find_rescue.return_value = 'fake-rescue-device'
mock_get_boot_spec.return_value = 'fake-boot-spec'
vm_ref = vmwareapi_fake.ManagedObjectReference()
mock_get_vm_ref.return_value = vm_ref
device = vmwareapi_fake.DataObject()
backing = vmwareapi_fake.DataObject()
backing.datastore = ds.ref
device.backing = backing
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
'fake-capacity',
device)
with test.nested(
mock.patch.object(self._vmops, 'get_datacenter_ref_and_name'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk)
) as (_get_dc_ref_and_name, fake_vmdk_info):
dc_info = mock.Mock()
_get_dc_ref_and_name.return_value = dc_info
self._vmops.rescue(
self._context, self._instance, None, self._image_meta)
mock_power_off.assert_called_once_with(self._session,
self._instance,
vm_ref)
uuid = self._instance.image_ref
cache_path = ds.build_path('vmware_base', uuid, uuid + '.vmdk')
rescue_path = ds.build_path(self._uuid, uuid + '-rescue.vmdk')
mock_disk_copy.assert_called_once_with(self._session, dc_info.ref,
cache_path, rescue_path)
_volumeops.attach_disk_to_vm.assert_called_once_with(vm_ref,
self._instance, mock.ANY, mock.ANY, rescue_path)
mock_get_boot_spec.assert_called_once_with(mock.ANY,
'fake-rescue-device')
mock_reconfigure.assert_called_once_with(self._session,
vm_ref,
'fake-boot-spec')
mock_power_on.assert_called_once_with(self._session,
self._instance,
vm_ref=vm_ref)
def test_unrescue_power_on(self):
self._test_unrescue(True)
def test_unrescue_power_off(self):
self._test_unrescue(False)
def _test_unrescue(self, power_on):
_volumeops = mock.Mock()
self._vmops._volumeops = _volumeops
vm_ref = mock.Mock()
def fake_call_method(module, method, *args, **kwargs):
expected_args = (vm_ref, 'config.hardware.device')
self.assertEqual('get_object_property', method)
self.assertEqual(expected_args, args)
with test.nested(
mock.patch.object(vm_util, 'power_on_instance'),
mock.patch.object(vm_util, 'find_rescue_device'),
mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
mock.patch.object(self._session, '_call_method',
fake_call_method),
mock.patch.object(vm_util, 'power_off_instance')
) as (_power_on_instance, _find_rescue, _get_vm_ref,
_call_method, _power_off):
self._vmops.unrescue(self._instance, power_on=power_on)
if power_on:
_power_on_instance.assert_called_once_with(self._session,
self._instance, vm_ref=vm_ref)
else:
self.assertFalse(_power_on_instance.called)
_get_vm_ref.assert_called_once_with(self._session,
self._instance)
_power_off.assert_called_once_with(self._session, self._instance,
vm_ref)
_volumeops.detach_disk_from_vm.assert_called_once_with(
vm_ref, self._instance, mock.ANY, destroy_disk=True)
def _test_finish_migration(self, power_on=True, resize_instance=False):
with test.nested(
mock.patch.object(self._vmops,
'_resize_create_ephemerals_and_swap'),
mock.patch.object(self._vmops, "_update_instance_progress"),
mock.patch.object(vm_util, "power_on_instance"),
mock.patch.object(vm_util, "get_vm_ref",
return_value='fake-ref')
) as (fake_resize_create_ephemerals_and_swap,
fake_update_instance_progress, fake_power_on, fake_get_vm_ref):
self._vmops.finish_migration(context=self._context,
migration=None,
instance=self._instance,
disk_info=None,
network_info=None,
block_device_info=None,
resize_instance=resize_instance,
image_meta=None,
power_on=power_on)
fake_resize_create_ephemerals_and_swap.assert_called_once_with(
'fake-ref', self._instance, None)
if power_on:
fake_power_on.assert_called_once_with(self._session,
self._instance,
vm_ref='fake-ref')
else:
self.assertFalse(fake_power_on.called)
calls = [
mock.call(self._context, self._instance, step=5,
total_steps=vmops.RESIZE_TOTAL_STEPS),
mock.call(self._context, self._instance, step=6,
total_steps=vmops.RESIZE_TOTAL_STEPS)]
fake_update_instance_progress.assert_has_calls(calls)
def test_finish_migration_power_on(self):
self._test_finish_migration(power_on=True, resize_instance=False)
def test_finish_migration_power_off(self):
self._test_finish_migration(power_on=False, resize_instance=False)
def test_finish_migration_power_on_resize(self):
self._test_finish_migration(power_on=True, resize_instance=True)
@mock.patch.object(vmops.VMwareVMOps, '_create_swap')
@mock.patch.object(vmops.VMwareVMOps, '_create_ephemeral')
@mock.patch.object(ds_obj, 'get_datastore_by_ref',
return_value='fake-ds-ref')
@mock.patch.object(vm_util, 'get_vmdk_info')
def _test_resize_create_ephemerals(self, vmdk, datastore,
mock_get_vmdk_info,
mock_get_datastore_by_ref,
mock_create_ephemeral,
mock_create_swap):
mock_get_vmdk_info.return_value = vmdk
dc_info = ds_util.DcInfo(ref='fake_ref', name='fake',
vmFolder='fake_folder')
with mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
return_value=dc_info) as mock_get_dc_ref_and_name:
self._vmops._resize_create_ephemerals_and_swap(
'vm-ref', self._instance, 'block-devices')
mock_get_vmdk_info.assert_called_once_with(
self._session, 'vm-ref', uuid=self._instance.uuid)
if vmdk.device:
mock_get_datastore_by_ref.assert_called_once_with(
self._session, datastore.ref)
mock_get_dc_ref_and_name.assert_called_once_with(datastore.ref)
mock_create_ephemeral.assert_called_once_with(
'block-devices', self._instance, 'vm-ref',
dc_info, 'fake-ds-ref', 'uuid', 'fake-adapter')
mock_create_swap.assert_called_once_with(
'block-devices', self._instance, 'vm-ref',
dc_info, 'fake-ds-ref', 'uuid', 'fake-adapter')
else:
self.assertFalse(mock_create_ephemeral.called)
self.assertFalse(mock_get_dc_ref_and_name.called)
self.assertFalse(mock_get_datastore_by_ref.called)
def test_resize_create_ephemerals(self):
datastore = ds_obj.Datastore(ref='fake-ref', name='fake')
device = vmwareapi_fake.DataObject()
backing = vmwareapi_fake.DataObject()
backing.datastore = datastore.ref
device.backing = backing
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
'fake-capacity',
device)
self._test_resize_create_ephemerals(vmdk, datastore)
def test_resize_create_ephemerals_no_root(self):
vmdk = vm_util.VmdkInfo(None, None, None, 0, None)
self._test_resize_create_ephemerals(vmdk, None)
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(vmops.VMwareVMOps, '_resize_create_ephemerals_and_swap')
@mock.patch.object(vmops.VMwareVMOps, '_remove_ephemerals_and_swap')
@mock.patch.object(ds_util, 'disk_delete')
@mock.patch.object(ds_util, 'disk_move')
@mock.patch.object(ds_util, 'file_exists',
return_value=True)
@mock.patch.object(vmops.VMwareVMOps, '_get_ds_browser',
return_value='fake-browser')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'get_vm_resize_spec',
return_value='fake-spec')
@mock.patch.object(vm_util, 'power_off_instance')
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
@mock.patch.object(vm_util, 'power_on_instance')
def _test_finish_revert_migration(self, fake_power_on,
fake_get_vm_ref, fake_power_off,
fake_resize_spec, fake_reconfigure_vm,
fake_get_browser,
fake_original_exists, fake_disk_move,
fake_disk_delete,
fake_remove_ephemerals_and_swap,
fake_resize_create_ephemerals_and_swap,
fake_get_extra_specs,
power_on):
"""Tests the finish_revert_migration method on vmops."""
datastore = ds_obj.Datastore(ref='fake-ref', name='fake')
device = vmwareapi_fake.DataObject()
backing = vmwareapi_fake.DataObject()
backing.datastore = datastore.ref
device.backing = backing
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
'fake-capacity',
device)
dc_info = ds_util.DcInfo(ref='fake_ref', name='fake',
vmFolder='fake_folder')
extra_specs = vm_util.ExtraSpecs()
fake_get_extra_specs.return_value = extra_specs
with test.nested(
mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
return_value=dc_info),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk)
) as (fake_get_dc_ref_and_name, fake_get_vmdk_info):
self._vmops._volumeops = mock.Mock()
mock_attach_disk = self._vmops._volumeops.attach_disk_to_vm
mock_detach_disk = self._vmops._volumeops.detach_disk_from_vm
self._vmops.finish_revert_migration(self._context,
instance=self._instance,
network_info=None,
block_device_info=None,
power_on=power_on)
fake_get_vm_ref.assert_called_once_with(self._session,
self._instance)
fake_power_off.assert_called_once_with(self._session,
self._instance,
'fake-ref')
# Validate VM reconfiguration
metadata = ('name:fake_display_name\n'
'userid:fake_user\n'
'username:None\n'
'projectid:fake_project\n'
'projectname:None\n'
'flavor:name:m1.small\n'
'flavor:memory_mb:512\n'
'flavor:vcpus:1\n'
'flavor:ephemeral_gb:0\n'
'flavor:root_gb:10\n'
'flavor:swap:0\n'
'imageid:70a599e0-31e7-49b7-b260-868f441e862b\n'
'package:%s\n' % version.version_string_with_package())
fake_resize_spec.assert_called_once_with(
self._session.vim.client.factory,
int(self._instance.vcpus),
int(self._instance.memory_mb),
extra_specs,
metadata=metadata)
fake_reconfigure_vm.assert_called_once_with(self._session,
'fake-ref',
'fake-spec')
# Validate disk configuration
fake_get_vmdk_info.assert_called_once_with(
self._session, 'fake-ref', uuid=self._instance.uuid)
fake_get_browser.assert_called_once_with('fake-ref')
fake_original_exists.assert_called_once_with(
self._session, 'fake-browser',
ds_obj.DatastorePath(datastore.name, 'uuid'),
'original.vmdk')
mock_detach_disk.assert_called_once_with('fake-ref',
self._instance,
device)
fake_disk_delete.assert_called_once_with(
self._session, dc_info.ref, '[fake] uuid/root.vmdk')
fake_disk_move.assert_called_once_with(
self._session, dc_info.ref,
'[fake] uuid/original.vmdk',
'[fake] uuid/root.vmdk')
mock_attach_disk.assert_called_once_with(
'fake-ref', self._instance, 'fake-adapter', 'fake-disk',
'[fake] uuid/root.vmdk')
fake_remove_ephemerals_and_swap.assert_called_once_with('fake-ref')
fake_resize_create_ephemerals_and_swap.assert_called_once_with(
'fake-ref', self._instance, None)
if power_on:
fake_power_on.assert_called_once_with(self._session,
self._instance)
else:
self.assertFalse(fake_power_on.called)
def test_finish_revert_migration_power_on(self):
self._test_finish_revert_migration(power_on=True)
def test_finish_revert_migration_power_off(self):
self._test_finish_revert_migration(power_on=False)
@mock.patch.object(vmops.VMwareVMOps, '_get_instance_metadata')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'get_vm_resize_spec',
return_value='fake-spec')
def test_resize_vm(self, fake_resize_spec, fake_reconfigure,
fake_get_extra_specs, fake_get_metadata):
extra_specs = vm_util.ExtraSpecs()
fake_get_extra_specs.return_value = extra_specs
fake_get_metadata.return_value = self._metadata
flavor = objects.Flavor(name='m1.small',
memory_mb=1024,
vcpus=2,
extra_specs={})
self._vmops._resize_vm(self._context, self._instance, 'vm-ref', flavor,
None)
fake_resize_spec.assert_called_once_with(
self._session.vim.client.factory, 2, 1024, extra_specs,
metadata=self._metadata)
fake_reconfigure.assert_called_once_with(self._session,
'vm-ref', 'fake-spec')
@mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
@mock.patch.object(ds_util, 'disk_move')
@mock.patch.object(ds_util, 'disk_copy')
def test_resize_disk(self, fake_disk_copy, fake_disk_move,
fake_extend):
datastore = ds_obj.Datastore(ref='fake-ref', name='fake')
device = vmwareapi_fake.DataObject()
backing = vmwareapi_fake.DataObject()
backing.datastore = datastore.ref
device.backing = backing
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
self._instance.flavor.root_gb * units.Gi,
device)
dc_info = ds_util.DcInfo(ref='fake_ref', name='fake',
vmFolder='fake_folder')
with mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
return_value=dc_info) as fake_get_dc_ref_and_name:
self._vmops._volumeops = mock.Mock()
mock_attach_disk = self._vmops._volumeops.attach_disk_to_vm
mock_detach_disk = self._vmops._volumeops.detach_disk_from_vm
flavor = fake_flavor.fake_flavor_obj(self._context,
root_gb=self._instance.flavor.root_gb + 1)
self._vmops._resize_disk(self._instance, 'fake-ref', vmdk, flavor)
fake_get_dc_ref_and_name.assert_called_once_with(datastore.ref)
fake_disk_copy.assert_called_once_with(
self._session, dc_info.ref, '[fake] uuid/root.vmdk',
'[fake] uuid/resized.vmdk')
mock_detach_disk.assert_called_once_with('fake-ref',
self._instance,
device)
fake_extend.assert_called_once_with(
self._instance, flavor['root_gb'] * units.Mi,
'[fake] uuid/resized.vmdk', dc_info.ref)
calls = [
mock.call(self._session, dc_info.ref,
'[fake] uuid/root.vmdk',
'[fake] uuid/original.vmdk'),
mock.call(self._session, dc_info.ref,
'[fake] uuid/resized.vmdk',
'[fake] uuid/root.vmdk')]
fake_disk_move.assert_has_calls(calls)
mock_attach_disk.assert_called_once_with(
'fake-ref', self._instance, 'fake-adapter', 'fake-disk',
'[fake] uuid/root.vmdk')
@mock.patch.object(vm_util, 'detach_devices_from_vm')
@mock.patch.object(vm_util, 'get_swap')
@mock.patch.object(vm_util, 'get_ephemerals')
def test_remove_ephemerals_and_swap(self, get_ephemerals, get_swap,
detach_devices):
get_ephemerals.return_value = [mock.sentinel.ephemeral0,
mock.sentinel.ephemeral1]
get_swap.return_value = mock.sentinel.swap
devices = [mock.sentinel.ephemeral0, mock.sentinel.ephemeral1,
mock.sentinel.swap]
self._vmops._remove_ephemerals_and_swap(mock.sentinel.vm_ref)
detach_devices.assert_called_once_with(self._vmops._session,
mock.sentinel.vm_ref, devices)
@mock.patch.object(ds_util, 'disk_delete')
@mock.patch.object(ds_util, 'file_exists',
return_value=True)
@mock.patch.object(vmops.VMwareVMOps, '_get_ds_browser',
return_value='fake-browser')
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def test_confirm_migration(self, fake_get_vm_ref, fake_get_browser,
fake_original_exists,
fake_disk_delete):
"""Tests the confirm_migration method on vmops."""
datastore = ds_obj.Datastore(ref='fake-ref', name='fake')
device = vmwareapi_fake.DataObject()
backing = vmwareapi_fake.DataObject()
backing.datastore = datastore.ref
device.backing = backing
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
'fake-capacity',
device)
dc_info = ds_util.DcInfo(ref='fake_ref', name='fake',
vmFolder='fake_folder')
with test.nested(
mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
return_value=dc_info),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk)
) as (fake_get_dc_ref_and_name, fake_get_vmdk_info):
self._vmops.confirm_migration(None,
self._instance,
None)
fake_get_vm_ref.assert_called_once_with(self._session,
self._instance)
fake_get_vmdk_info.assert_called_once_with(
self._session, 'fake-ref', uuid=self._instance.uuid)
fake_get_browser.assert_called_once_with('fake-ref')
fake_original_exists.assert_called_once_with(
self._session, 'fake-browser',
ds_obj.DatastorePath(datastore.name, 'uuid'),
'original.vmdk')
fake_disk_delete.assert_called_once_with(
self._session, dc_info.ref, '[fake] uuid/original.vmdk')
def test_migrate_disk_and_power_off(self):
self._test_migrate_disk_and_power_off(
flavor_root_gb=self._instance.flavor.root_gb + 1)
def test_migrate_disk_and_power_off_zero_disk_flavor(self):
self._instance.flavor.root_gb = 0
self._test_migrate_disk_and_power_off(flavor_root_gb=0)
def test_migrate_disk_and_power_off_disk_shrink(self):
self.assertRaises(exception.InstanceFaultRollback,
self._test_migrate_disk_and_power_off,
flavor_root_gb=self._instance.flavor.root_gb - 1)
@mock.patch.object(vmops.VMwareVMOps, "_remove_ephemerals_and_swap")
@mock.patch.object(vm_util, 'get_vmdk_info')
@mock.patch.object(vmops.VMwareVMOps, "_resize_disk")
@mock.patch.object(vmops.VMwareVMOps, "_resize_vm")
@mock.patch.object(vm_util, 'power_off_instance')
@mock.patch.object(vmops.VMwareVMOps, "_update_instance_progress")
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def _test_migrate_disk_and_power_off(self, fake_get_vm_ref, fake_progress,
fake_power_off, fake_resize_vm,
fake_resize_disk, fake_get_vmdk_info,
fake_remove_ephemerals_and_swap,
flavor_root_gb):
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
self._instance.flavor.root_gb * units.Gi,
'fake-device')
fake_get_vmdk_info.return_value = vmdk
flavor = fake_flavor.fake_flavor_obj(self._context,
root_gb=flavor_root_gb)
self._vmops.migrate_disk_and_power_off(self._context,
self._instance,
None,
flavor)
fake_get_vm_ref.assert_called_once_with(self._session,
self._instance)
fake_power_off.assert_called_once_with(self._session,
self._instance,
'fake-ref')
fake_resize_vm.assert_called_once_with(self._context, self._instance,
'fake-ref', flavor, mock.ANY)
fake_resize_disk.assert_called_once_with(self._instance, 'fake-ref',
vmdk, flavor)
calls = [mock.call(self._context, self._instance, step=i,
total_steps=vmops.RESIZE_TOTAL_STEPS)
for i in range(4)]
fake_progress.assert_has_calls(calls)
@mock.patch.object(vutil, 'get_inventory_path', return_value='fake_path')
@mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
@mock.patch.object(vmops.VMwareVMOps, '_create_config_drive')
def test_configure_config_drive(self,
mock_create_config_drive,
mock_attach_cdrom_to_vm,
mock_get_inventory_path):
injected_files = mock.Mock()
admin_password = mock.Mock()
network_info = mock.Mock()
vm_ref = mock.Mock()
mock_create_config_drive.return_value = "fake_iso_path"
self._vmops._configure_config_drive(
self._context, self._instance, vm_ref, self._dc_info, self._ds,
injected_files, admin_password, network_info)
upload_iso_path = self._ds.build_path("fake_iso_path")
mock_get_inventory_path.assert_called_once_with(self._session.vim,
self._dc_info.ref)
mock_create_config_drive.assert_called_once_with(
self._context, self._instance, injected_files, admin_password,
network_info, self._ds.name, 'fake_path', self._instance.uuid,
"Fake-CookieJar")
mock_attach_cdrom_to_vm.assert_called_once_with(
vm_ref, self._instance, self._ds.ref, str(upload_iso_path))
@mock.patch('nova.image.api.API.get')
@mock.patch.object(vmops.LOG, 'debug')
@mock.patch.object(vmops.VMwareVMOps, '_fetch_image_if_missing')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.lockutils, 'lock')
def test_spawn_mask_block_device_info_password(self, mock_lock,
mock_build_virtual_machine, mock_get_vm_config_info,
mock_fetch_image_if_missing, mock_debug, mock_glance):
# Very simple test that just ensures block_device_info auth_password
# is masked when logged; the rest of the test just fails out early.
data = {'auth_password': 'scrubme'}
bdm = [{'boot_index': 0, 'disk_bus': constants.DEFAULT_ADAPTER_TYPE,
'connection_info': {'data': data}}]
bdi = {'block_device_mapping': bdm}
self.password_logged = False
# Tests that the parameters to the to_xml method are sanitized for
# passwords when logged.
def fake_debug(*args, **kwargs):
if 'auth_password' in args[0]:
self.password_logged = True
self.assertNotIn('scrubme', args[0])
mock_debug.side_effect = fake_debug
self.flags(flat_injected=False)
self.flags(enabled=False, group='vnc')
# Call spawn(). We don't care what it does as long as it generates
# the log message, which we check below.
with mock.patch.object(self._vmops, '_volumeops') as mock_vo:
mock_vo.attach_root_volume.side_effect = test.TestingException
try:
self._vmops.spawn(
self._context, self._instance, self._image_meta,
injected_files=None, admin_password=None,
network_info=[], block_device_info=bdi
)
except test.TestingException:
pass
# Check that the relevant log message was generated, and therefore
# that we checked it was scrubbed
self.assertTrue(self.password_logged)
def _get_metadata(self, is_image_used=True):
if is_image_used:
image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
else:
image_id = None
return ("name:fake_display_name\n"
"userid:fake_user\n"
"username:None\n"
"projectid:fake_project\n"
"projectname:None\n"
"flavor:name:m1.small\n"
"flavor:memory_mb:512\n"
"flavor:vcpus:1\n"
"flavor:ephemeral_gb:0\n"
"flavor:root_gb:10\n"
"flavor:swap:0\n"
"imageid:%(image_id)s\n"
"package:%(version)s\n" % {
'image_id': image_id,
'version': version.version_string_with_package()})
@mock.patch.object(vm_util, 'rename_vm')
@mock.patch.object(vmops.VMwareVMOps, '_create_folders',
return_value='fake_vm_folder')
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch.object(vmops.VMwareVMOps, '_use_disk_image_as_linked_clone')
@mock.patch.object(vmops.VMwareVMOps, '_fetch_image_if_missing')
@mock.patch(
'nova.virt.vmwareapi.imagecache.ImageCacheManager.enlist_image')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_non_root_block_device(self, from_image,
get_extra_specs,
get_vm_config_info,
build_virtual_machine,
enlist_image, fetch_image,
use_disk_image,
power_on_instance,
create_folders,
rename_vm):
self._instance.flavor = self._flavor
extra_specs = get_extra_specs.return_value
connection_info1 = {'data': 'fake-data1', 'serial': 'volume-fake-id1'}
connection_info2 = {'data': 'fake-data2', 'serial': 'volume-fake-id2'}
bdm = [{'connection_info': connection_info1,
'disk_bus': constants.ADAPTER_TYPE_IDE,
'mount_device': '/dev/sdb'},
{'connection_info': connection_info2,
'disk_bus': constants.DEFAULT_ADAPTER_TYPE,
'mount_device': '/dev/sdc'}]
bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'}
self.flags(flat_injected=False)
self.flags(enabled=False, group='vnc')
image_size = (self._instance.flavor.root_gb) * units.Gi / 2
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=image_size)
vi = get_vm_config_info.return_value
from_image.return_value = image_info
build_virtual_machine.return_value = 'fake-vm-ref'
with mock.patch.object(self._vmops, '_volumeops') as volumeops:
self._vmops.spawn(self._context, self._instance, self._image_meta,
injected_files=None, admin_password=None,
network_info=[], block_device_info=bdi)
from_image.assert_called_once_with(self._context,
self._instance.image_ref,
self._image_meta)
get_vm_config_info.assert_called_once_with(self._instance,
image_info, extra_specs)
build_virtual_machine.assert_called_once_with(self._instance,
image_info, vi.dc_info, vi.datastore, [],
extra_specs, self._get_metadata())
enlist_image.assert_called_once_with(image_info.image_id,
vi.datastore, vi.dc_info.ref)
fetch_image.assert_called_once_with(self._context, vi)
use_disk_image.assert_called_once_with('fake-vm-ref', vi)
volumeops.attach_volume.assert_any_call(
connection_info1, self._instance, constants.ADAPTER_TYPE_IDE)
volumeops.attach_volume.assert_any_call(
connection_info2, self._instance,
constants.DEFAULT_ADAPTER_TYPE)
@mock.patch.object(vm_util, 'rename_vm')
@mock.patch.object(vmops.VMwareVMOps, '_create_folders',
return_value='fake_vm_folder')
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_with_no_image_and_block_devices(self, from_image,
get_extra_specs,
get_vm_config_info,
build_virtual_machine,
power_on_instance,
create_folders,
rename_vm):
self._instance.image_ref = None
self._instance.flavor = self._flavor
extra_specs = get_extra_specs.return_value
connection_info1 = {'data': 'fake-data1', 'serial': 'volume-fake-id1'}
connection_info2 = {'data': 'fake-data2', 'serial': 'volume-fake-id2'}
connection_info3 = {'data': 'fake-data3', 'serial': 'volume-fake-id3'}
bdm = [{'boot_index': 0,
'connection_info': connection_info1,
'disk_bus': constants.ADAPTER_TYPE_IDE},
{'boot_index': 1,
'connection_info': connection_info2,
'disk_bus': constants.DEFAULT_ADAPTER_TYPE},
{'boot_index': 2,
'connection_info': connection_info3,
'disk_bus': constants.ADAPTER_TYPE_LSILOGICSAS}]
bdi = {'block_device_mapping': bdm}
self.flags(flat_injected=False)
self.flags(enabled=False, group='vnc')
image_info = mock.sentinel.image_info
vi = get_vm_config_info.return_value
from_image.return_value = image_info
build_virtual_machine.return_value = 'fake-vm-ref'
with mock.patch.object(self._vmops, '_volumeops') as volumeops:
self._vmops.spawn(self._context, self._instance, self._image_meta,
injected_files=None, admin_password=None,
network_info=[], block_device_info=bdi)
from_image.assert_called_once_with(self._context,
self._instance.image_ref,
self._image_meta)
get_vm_config_info.assert_called_once_with(self._instance,
image_info, extra_specs)
build_virtual_machine.assert_called_once_with(self._instance,
image_info, vi.dc_info, vi.datastore, [],
extra_specs, self._get_metadata(is_image_used=False))
volumeops.attach_root_volume.assert_called_once_with(
connection_info1, self._instance, vi.datastore.ref,
constants.ADAPTER_TYPE_IDE)
volumeops.attach_volume.assert_any_call(
connection_info2, self._instance,
constants.DEFAULT_ADAPTER_TYPE)
volumeops.attach_volume.assert_any_call(
connection_info3, self._instance,
constants.ADAPTER_TYPE_LSILOGICSAS)
@mock.patch.object(vmops.VMwareVMOps, '_create_folders',
return_value='fake_vm_folder')
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_unsupported_hardware(self, from_image,
get_extra_specs,
get_vm_config_info,
build_virtual_machine,
power_on_instance,
create_folders):
self._instance.image_ref = None
self._instance.flavor = self._flavor
extra_specs = get_extra_specs.return_value
connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'}
bdm = [{'boot_index': 0,
'connection_info': connection_info,
'disk_bus': 'invalid_adapter_type'}]
bdi = {'block_device_mapping': bdm}
self.flags(flat_injected=False)
self.flags(enabled=False, group='vnc')
image_info = mock.sentinel.image_info
vi = get_vm_config_info.return_value
from_image.return_value = image_info
build_virtual_machine.return_value = 'fake-vm-ref'
self.assertRaises(exception.UnsupportedHardware, self._vmops.spawn,
self._context, self._instance, self._image_meta,
injected_files=None,
admin_password=None, network_info=[],
block_device_info=bdi)
from_image.assert_called_once_with(self._context,
self._instance.image_ref,
self._image_meta)
get_vm_config_info.assert_called_once_with(
self._instance, image_info, extra_specs)
build_virtual_machine.assert_called_once_with(self._instance,
image_info, vi.dc_info, vi.datastore, [],
extra_specs, self._get_metadata(is_image_used=False))
def test_get_ds_browser(self):
cache = self._vmops._datastore_browser_mapping
ds_browser = mock.Mock()
moref = vmwareapi_fake.ManagedObjectReference('datastore-100')
self.assertIsNone(cache.get(moref.value))
mock_call_method = mock.Mock(return_value=ds_browser)
with mock.patch.object(self._session, '_call_method',
mock_call_method):
ret = self._vmops._get_ds_browser(moref)
mock_call_method.assert_called_once_with(vutil,
'get_object_property', moref, 'browser')
self.assertIs(ds_browser, ret)
self.assertIs(ds_browser, cache.get(moref.value))
@mock.patch.object(
vmops.VMwareVMOps, '_sized_image_exists', return_value=False)
@mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
@mock.patch.object(vm_util, 'copy_virtual_disk')
def _test_use_disk_image_as_linked_clone(self,
mock_copy_virtual_disk,
mock_extend_virtual_disk,
mock_sized_image_exists,
flavor_fits_image=False):
extra_specs = vm_util.ExtraSpecs()
file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=file_size,
linked_clone=False)
cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
mock_imagecache = mock.Mock()
mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock_imagecache, extra_specs)
sized_cached_image_ds_loc = cache_root_folder.join(
"%s.%s.vmdk" % (self._image_id, vi.root_gb))
self._vmops._volumeops = mock.Mock()
mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
self._vmops._use_disk_image_as_linked_clone("fake_vm_ref", vi)
mock_copy_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
str(vi.cache_image_path),
str(sized_cached_image_ds_loc))
if not flavor_fits_image:
mock_extend_virtual_disk.assert_called_once_with(
self._instance, vi.root_gb * units.Mi,
str(sized_cached_image_ds_loc),
self._dc_info.ref)
mock_attach_disk_to_vm.assert_called_once_with(
"fake_vm_ref", self._instance, vi.ii.adapter_type,
vi.ii.disk_type,
str(sized_cached_image_ds_loc),
vi.root_gb * units.Mi, False,
disk_io_limits=vi._extra_specs.disk_io_limits)
def test_use_disk_image_as_linked_clone(self):
self._test_use_disk_image_as_linked_clone()
def test_use_disk_image_as_linked_clone_flavor_fits_image(self):
self._test_use_disk_image_as_linked_clone(flavor_fits_image=True)
@mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
@mock.patch.object(vm_util, 'copy_virtual_disk')
def _test_use_disk_image_as_full_clone(self,
mock_copy_virtual_disk,
mock_extend_virtual_disk,
flavor_fits_image=False):
extra_specs = vm_util.ExtraSpecs()
file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=file_size,
linked_clone=False)
cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
mock_imagecache = mock.Mock()
mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock_imagecache,
extra_specs)
self._vmops._volumeops = mock.Mock()
mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
self._vmops._use_disk_image_as_full_clone("fake_vm_ref", vi)
fake_path = '[fake_ds] %(uuid)s/%(uuid)s.vmdk' % {'uuid': self._uuid}
mock_copy_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
str(vi.cache_image_path),
fake_path)
if not flavor_fits_image:
mock_extend_virtual_disk.assert_called_once_with(
self._instance, vi.root_gb * units.Mi,
fake_path, self._dc_info.ref)
mock_attach_disk_to_vm.assert_called_once_with(
"fake_vm_ref", self._instance, vi.ii.adapter_type,
vi.ii.disk_type, fake_path,
vi.root_gb * units.Mi, False,
disk_io_limits=vi._extra_specs.disk_io_limits)
def test_use_disk_image_as_full_clone(self):
self._test_use_disk_image_as_full_clone()
def test_use_disk_image_as_full_clone_image_too_big(self):
self._test_use_disk_image_as_full_clone(flavor_fits_image=True)
@mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
@mock.patch.object(vm_util, 'create_virtual_disk')
def _test_use_iso_image(self,
mock_create_virtual_disk,
mock_attach_cdrom,
with_root_disk):
extra_specs = vm_util.ExtraSpecs()
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=10 * units.Mi,
linked_clone=True)
cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
mock_imagecache = mock.Mock()
mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock_imagecache, extra_specs)
self._vmops._volumeops = mock.Mock()
mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
self._vmops._use_iso_image("fake_vm_ref", vi)
mock_attach_cdrom.assert_called_once_with(
"fake_vm_ref", self._instance, self._ds.ref,
str(vi.cache_image_path))
fake_path = '[fake_ds] %(uuid)s/%(uuid)s.vmdk' % {'uuid': self._uuid}
if with_root_disk:
mock_create_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
vi.ii.adapter_type, vi.ii.disk_type,
fake_path,
vi.root_gb * units.Mi)
linked_clone = False
mock_attach_disk_to_vm.assert_called_once_with(
"fake_vm_ref", self._instance,
vi.ii.adapter_type, vi.ii.disk_type,
fake_path,
vi.root_gb * units.Mi, linked_clone,
disk_io_limits=vi._extra_specs.disk_io_limits)
def test_use_iso_image_with_root_disk(self):
self._test_use_iso_image(with_root_disk=True)
def test_use_iso_image_without_root_disk(self):
self._test_use_iso_image(with_root_disk=False)
def _verify_spawn_method_calls(self, mock_call_method, extras=None):
# TODO(vui): More explicit assertions of spawn() behavior
# are waiting on additional refactoring pertaining to image
# handling/manipulation. Till then, we continue to assert on the
# sequence of VIM operations invoked.
expected_methods = ['get_object_property',
'SearchDatastore_Task',
'CreateVirtualDisk_Task',
'DeleteDatastoreFile_Task',
'MoveDatastoreFile_Task',
'DeleteDatastoreFile_Task',
'SearchDatastore_Task',
'ExtendVirtualDisk_Task',
]
if extras:
expected_methods.extend(extras)
# Last call should be renaming the instance
expected_methods.append('Rename_Task')
recorded_methods = [c[1][1] for c in mock_call_method.mock_calls]
self.assertEqual(expected_methods, recorded_methods)
@mock.patch.object(vmops.VMwareVMOps, '_create_folders',
return_value='fake_vm_folder')
@mock.patch(
'nova.virt.vmwareapi.vmops.VMwareVMOps._update_vnic_index')
@mock.patch(
'nova.virt.vmwareapi.vmops.VMwareVMOps._configure_config_drive')
@mock.patch('nova.virt.vmwareapi.ds_util.get_datastore')
@mock.patch(
'nova.virt.vmwareapi.vmops.VMwareVMOps.get_datacenter_ref_and_name')
@mock.patch('nova.virt.vmwareapi.vif.get_vif_info',
return_value=[])
@mock.patch('nova.utils.is_neutron',
return_value=False)
@mock.patch('nova.virt.vmwareapi.vm_util.get_vm_create_spec',
return_value='fake_create_spec')
@mock.patch('nova.virt.vmwareapi.vm_util.create_vm',
return_value='fake_vm_ref')
@mock.patch('nova.virt.vmwareapi.ds_util.mkdir')
@mock.patch('nova.virt.vmwareapi.vmops.VMwareVMOps._set_machine_id')
@mock.patch(
'nova.virt.vmwareapi.imagecache.ImageCacheManager.enlist_image')
@mock.patch.object(vmops.VMwareVMOps, '_get_and_set_vnc_config')
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch('nova.virt.vmwareapi.vm_util.copy_virtual_disk')
# TODO(dims): Need to add tests for create_virtual_disk after the
# disk/image code in spawn gets refactored
def _test_spawn(self,
mock_copy_virtual_disk,
mock_power_on_instance,
mock_get_and_set_vnc_config,
mock_enlist_image,
mock_set_machine_id,
mock_mkdir,
mock_create_vm,
mock_get_create_spec,
mock_is_neutron,
mock_get_vif_info,
mock_get_datacenter_ref_and_name,
mock_get_datastore,
mock_configure_config_drive,
mock_update_vnic_index,
mock_create_folders,
block_device_info=None,
extra_specs=None,
config_drive=False):
if extra_specs is None:
extra_specs = vm_util.ExtraSpecs()
image_size = (self._instance.flavor.root_gb) * units.Gi / 2
image = {
'id': self._image_id,
'disk_format': 'vmdk',
'size': image_size,
}
image = objects.ImageMeta.from_dict(image)
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=image_size)
vi = self._vmops._get_vm_config_info(
self._instance, image_info, extra_specs)
self._vmops._volumeops = mock.Mock()
network_info = mock.Mock()
mock_get_datastore.return_value = self._ds
mock_get_datacenter_ref_and_name.return_value = self._dc_info
mock_call_method = mock.Mock(return_value='fake_task')
if extra_specs is None:
extra_specs = vm_util.ExtraSpecs()
with test.nested(
mock.patch.object(self._session, '_wait_for_task'),
mock.patch.object(self._session, '_call_method',
mock_call_method),
mock.patch.object(uuidutils, 'generate_uuid',
return_value='tmp-uuid'),
mock.patch.object(images, 'fetch_image'),
mock.patch('nova.image.api.API.get'),
mock.patch.object(vutil, 'get_inventory_path',
return_value=self._dc_info.name),
mock.patch.object(self._vmops, '_get_extra_specs',
return_value=extra_specs),
mock.patch.object(self._vmops, '_get_instance_metadata',
return_value='fake-metadata')
) as (_wait_for_task, _call_method, _generate_uuid, _fetch_image,
_get_img_svc, _get_inventory_path, _get_extra_specs,
_get_instance_metadata):
self._vmops.spawn(self._context, self._instance, image,
injected_files='fake_files',
admin_password='password',
network_info=network_info,
block_device_info=block_device_info)
mock_is_neutron.assert_called_once_with()
self.assertEqual(2, mock_mkdir.call_count)
mock_get_vif_info.assert_called_once_with(
self._session, self._cluster.obj, False,
constants.DEFAULT_VIF_MODEL, network_info)
mock_get_create_spec.assert_called_once_with(
self._session.vim.client.factory,
self._instance,
'fake_ds',
[],
extra_specs,
constants.DEFAULT_OS_TYPE,
profile_spec=None,
metadata='fake-metadata')
mock_create_vm.assert_called_once_with(
self._session,
self._instance,
'fake_vm_folder',
'fake_create_spec',
self._cluster.resourcePool)
mock_get_and_set_vnc_config.assert_called_once_with(
self._session.vim.client.factory,
self._instance,
'fake_vm_ref')
mock_set_machine_id.assert_called_once_with(
self._session.vim.client.factory,
self._instance,
network_info,
vm_ref='fake_vm_ref')
mock_power_on_instance.assert_called_once_with(
self._session, self._instance, vm_ref='fake_vm_ref')
if (block_device_info and
'block_device_mapping' in block_device_info):
bdms = block_device_info['block_device_mapping']
for bdm in bdms:
mock_attach_root = (
self._vmops._volumeops.attach_root_volume)
mock_attach = self._vmops._volumeops.attach_volume
adapter_type = bdm.get('disk_bus') or vi.ii.adapter_type
if bdm.get('boot_index') == 0:
mock_attach_root.assert_any_call(
bdm['connection_info'], self._instance,
self._ds.ref, adapter_type)
else:
mock_attach.assert_any_call(
bdm['connection_info'], self._instance,
self._ds.ref, adapter_type)
mock_enlist_image.assert_called_once_with(
self._image_id, self._ds, self._dc_info.ref)
upload_file_name = 'vmware_temp/tmp-uuid/%s/%s-flat.vmdk' % (
self._image_id, self._image_id)
_fetch_image.assert_called_once_with(
self._context,
self._instance,
self._session._host,
self._session._port,
self._dc_info.name,
self._ds.name,
upload_file_name,
cookies='Fake-CookieJar')
self.assertGreater(len(_wait_for_task.mock_calls), 0)
_get_inventory_path.call_count = 1
extras = None
if block_device_info and ('ephemerals' in block_device_info or
'swap' in block_device_info):
extras = ['CreateVirtualDisk_Task']
self._verify_spawn_method_calls(_call_method, extras)
dc_ref = 'fake_dc_ref'
source_file = six.text_type('[fake_ds] vmware_base/%s/%s.vmdk' %
(self._image_id, self._image_id))
dest_file = six.text_type('[fake_ds] vmware_base/%s/%s.%d.vmdk' %
(self._image_id, self._image_id,
self._instance['root_gb']))
# TODO(dims): add more tests for copy_virtual_disk after
# the disk/image code in spawn gets refactored
mock_copy_virtual_disk.assert_called_with(self._session,
dc_ref,
source_file,
dest_file)
if config_drive:
mock_configure_config_drive.assert_called_once_with(
self._context, self._instance, 'fake_vm_ref',
self._dc_info, self._ds, 'fake_files', 'password',
network_info)
mock_update_vnic_index.assert_called_once_with(
self._context, self._instance, network_info)
@mock.patch.object(ds_util, 'get_datastore')
@mock.patch.object(vmops.VMwareVMOps, 'get_datacenter_ref_and_name')
def _test_get_spawn_vm_config_info(self,
mock_get_datacenter_ref_and_name,
mock_get_datastore,
image_size_bytes=0):
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=image_size_bytes,
linked_clone=True)
mock_get_datastore.return_value = self._ds
mock_get_datacenter_ref_and_name.return_value = self._dc_info
extra_specs = vm_util.ExtraSpecs()
vi = self._vmops._get_vm_config_info(self._instance, image_info,
extra_specs)
self.assertEqual(image_info, vi.ii)
self.assertEqual(self._ds, vi.datastore)
self.assertEqual(self._instance.flavor.root_gb, vi.root_gb)
self.assertEqual(self._instance, vi.instance)
self.assertEqual(self._instance.uuid, vi.instance.uuid)
self.assertEqual(extra_specs, vi._extra_specs)
cache_image_path = '[%s] vmware_base/%s/%s.vmdk' % (
self._ds.name, self._image_id, self._image_id)
self.assertEqual(cache_image_path, str(vi.cache_image_path))
cache_image_folder = '[%s] vmware_base/%s' % (
self._ds.name, self._image_id)
self.assertEqual(cache_image_folder, str(vi.cache_image_folder))
def test_get_spawn_vm_config_info(self):
image_size = (self._instance.flavor.root_gb) * units.Gi / 2
self._test_get_spawn_vm_config_info(image_size_bytes=image_size)
def test_get_spawn_vm_config_info_image_too_big(self):
image_size = (self._instance.flavor.root_gb + 1) * units.Gi
self.assertRaises(exception.InstanceUnacceptable,
self._test_get_spawn_vm_config_info,
image_size_bytes=image_size)
def test_spawn(self):
self._test_spawn()
def test_spawn_config_drive_enabled(self):
self.flags(force_config_drive=True)
self._test_spawn(config_drive=True)
def test_spawn_with_block_device_info(self):
block_device_info = {
'block_device_mapping': [{'boot_index': 0,
'connection_info': 'fake',
'mount_device': '/dev/vda'}]
}
self._test_spawn(block_device_info=block_device_info)
def test_spawn_with_block_device_info_with_config_drive(self):
self.flags(force_config_drive=True)
block_device_info = {
'block_device_mapping': [{'boot_index': 0,
'connection_info': 'fake',
'mount_device': '/dev/vda'}]
}
self._test_spawn(block_device_info=block_device_info,
config_drive=True)
def _spawn_with_block_device_info_ephemerals(self, ephemerals):
block_device_info = {'ephemerals': ephemerals}
self._test_spawn(block_device_info=block_device_info)
def test_spawn_with_block_device_info_ephemerals(self):
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'size': 1}]
self._spawn_with_block_device_info_ephemerals(ephemerals)
def test_spawn_with_block_device_info_ephemerals_no_disk_bus(self):
ephemerals = [{'device_type': 'disk',
'disk_bus': None,
'device_name': '/dev/vdb',
'size': 1}]
self._spawn_with_block_device_info_ephemerals(ephemerals)
def test_spawn_with_block_device_info_swap(self):
block_device_info = {'swap': {'disk_bus': None,
'swap_size': 512,
'device_name': '/dev/sdb'}}
self._test_spawn(block_device_info=block_device_info)
@mock.patch.object(vm_util, 'rename_vm')
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch.object(vmops.VMwareVMOps, '_create_and_attach_thin_disk')
@mock.patch.object(vmops.VMwareVMOps, '_use_disk_image_as_linked_clone')
@mock.patch.object(vmops.VMwareVMOps, '_fetch_image_if_missing')
@mock.patch(
'nova.virt.vmwareapi.imagecache.ImageCacheManager.enlist_image')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_with_ephemerals_and_swap(self, from_image,
get_extra_specs,
get_vm_config_info,
build_virtual_machine,
enlist_image,
fetch_image,
use_disk_image,
create_and_attach_thin_disk,
power_on_instance,
rename_vm):
self._instance.flavor = objects.Flavor(vcpus=1, memory_mb=512,
name="m1.tiny", root_gb=1,
ephemeral_gb=1, swap=512,
extra_specs={})
extra_specs = self._vmops._get_extra_specs(self._instance.flavor)
ephemerals = [{'device_type': 'disk',
'disk_bus': None,
'device_name': '/dev/vdb',
'size': 1},
{'device_type': 'disk',
'disk_bus': None,
'device_name': '/dev/vdc',
'size': 1}]
swap = {'disk_bus': None, 'swap_size': 512, 'device_name': '/dev/vdd'}
bdi = {'block_device_mapping': [], 'root_device_name': '/dev/sda',
'ephemerals': ephemerals, 'swap': swap}
metadata = self._vmops._get_instance_metadata(self._context,
self._instance)
self.flags(enabled=False, group='vnc')
self.flags(flat_injected=False)
image_size = (self._instance.flavor.root_gb) * units.Gi / 2
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=image_size)
vi = get_vm_config_info.return_value
from_image.return_value = image_info
build_virtual_machine.return_value = 'fake-vm-ref'
self._vmops.spawn(self._context, self._instance, {},
injected_files=None, admin_password=None,
network_info=[], block_device_info=bdi)
from_image.assert_called_once_with(
self._context, self._instance.image_ref, {})
get_vm_config_info.assert_called_once_with(self._instance,
image_info, extra_specs)
build_virtual_machine.assert_called_once_with(self._instance,
image_info, vi.dc_info, vi.datastore, [], extra_specs, metadata)
enlist_image.assert_called_once_with(image_info.image_id,
vi.datastore, vi.dc_info.ref)
fetch_image.assert_called_once_with(self._context, vi)
use_disk_image.assert_called_once_with('fake-vm-ref', vi)
# _create_and_attach_thin_disk should be called for each ephemeral
# and swap disk
eph0_path = str(ds_obj.DatastorePath(vi.datastore.name,
self._uuid,
'ephemeral_0.vmdk'))
eph1_path = str(ds_obj.DatastorePath(vi.datastore.name,
self._uuid,
'ephemeral_1.vmdk'))
swap_path = str(ds_obj.DatastorePath(vi.datastore.name,
self._uuid,
'swap.vmdk'))
create_and_attach_thin_disk.assert_has_calls([
mock.call(self._instance, 'fake-vm-ref', vi.dc_info,
ephemerals[0]['size'] * units.Mi, vi.ii.adapter_type,
eph0_path),
mock.call(self._instance, 'fake-vm-ref', vi.dc_info,
ephemerals[1]['size'] * units.Mi, vi.ii.adapter_type,
eph1_path),
mock.call(self._instance, 'fake-vm-ref', vi.dc_info,
swap['swap_size'] * units.Ki, vi.ii.adapter_type,
swap_path)
])
power_on_instance.assert_called_once_with(self._session,
self._instance,
vm_ref='fake-vm-ref')
def _get_fake_vi(self):
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=7,
linked_clone=False)
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock.Mock())
return vi
@mock.patch.object(vm_util, 'create_virtual_disk')
def test_create_and_attach_thin_disk(self, mock_create):
vi = self._get_fake_vi()
self._vmops._volumeops = mock.Mock()
mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
path = str(ds_obj.DatastorePath(vi.datastore.name, self._uuid,
'fake-filename'))
self._vmops._create_and_attach_thin_disk(self._instance,
'fake-vm-ref',
vi.dc_info, 1,
'fake-adapter-type',
path)
mock_create.assert_called_once_with(
self._session, self._dc_info.ref, 'fake-adapter-type',
'thin', path, 1)
mock_attach_disk_to_vm.assert_called_once_with(
'fake-vm-ref', self._instance, 'fake-adapter-type',
'thin', path, 1, False)
def test_create_ephemeral_with_bdi(self):
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'size': 1}]
block_device_info = {'ephemerals': ephemerals}
vi = self._get_fake_vi()
with mock.patch.object(
self._vmops, '_create_and_attach_thin_disk') as mock_caa:
self._vmops._create_ephemeral(block_device_info,
self._instance,
'fake-vm-ref',
vi.dc_info, vi.datastore,
self._uuid,
vi.ii.adapter_type)
mock_caa.assert_called_once_with(
self._instance, 'fake-vm-ref',
vi.dc_info, 1 * units.Mi, 'virtio',
'[fake_ds] %s/ephemeral_0.vmdk' % self._uuid)
def _test_create_ephemeral_from_instance(self, bdi):
vi = self._get_fake_vi()
with mock.patch.object(
self._vmops, '_create_and_attach_thin_disk') as mock_caa:
self._vmops._create_ephemeral(bdi,
self._instance,
'fake-vm-ref',
vi.dc_info, vi.datastore,
self._uuid,
vi.ii.adapter_type)
mock_caa.assert_called_once_with(
self._instance, 'fake-vm-ref',
vi.dc_info, 1 * units.Mi, constants.DEFAULT_ADAPTER_TYPE,
'[fake_ds] %s/ephemeral_0.vmdk' % self._uuid)
def test_create_ephemeral_with_bdi_but_no_ephemerals(self):
block_device_info = {'ephemerals': []}
self._instance.flavor.ephemeral_gb = 1
self._test_create_ephemeral_from_instance(block_device_info)
def test_create_ephemeral_with_no_bdi(self):
self._instance.flavor.ephemeral_gb = 1
self._test_create_ephemeral_from_instance(None)
def _test_create_swap_from_instance(self, bdi):
vi = self._get_fake_vi()
flavor = objects.Flavor(vcpus=1, memory_mb=1024, ephemeral_gb=1,
swap=1024, extra_specs={})
self._instance.flavor = flavor
with mock.patch.object(
self._vmops, '_create_and_attach_thin_disk'
) as create_and_attach:
self._vmops._create_swap(bdi, self._instance, 'fake-vm-ref',
vi.dc_info, vi.datastore, self._uuid,
'lsiLogic')
size = flavor.swap * units.Ki
if bdi is not None:
swap = bdi.get('swap', {})
size = swap.get('swap_size', 0) * units.Ki
path = str(ds_obj.DatastorePath(vi.datastore.name, self._uuid,
'swap.vmdk'))
create_and_attach.assert_called_once_with(self._instance,
'fake-vm-ref', vi.dc_info, size, 'lsiLogic', path)
def test_create_swap_with_bdi(self):
block_device_info = {'swap': {'disk_bus': None,
'swap_size': 512,
'device_name': '/dev/sdb'}}
self._test_create_swap_from_instance(block_device_info)
def test_create_swap_with_no_bdi(self):
self._test_create_swap_from_instance(None)
@mock.patch.object(vmops.VMwareVMOps, '_create_folders',
return_value='fake_vm_folder')
def test_build_virtual_machine(self, mock_create_folder):
image_id = nova.tests.unit.image.fake.get_valid_image_id()
image = images.VMwareImage(image_id=image_id)
extra_specs = vm_util.ExtraSpecs()
vm_ref = self._vmops.build_virtual_machine(self._instance,
image, self._dc_info,
self._ds,
self.network_info,
extra_specs,
self._metadata)
vm = vmwareapi_fake._get_object(vm_ref)
# Test basic VM parameters
self.assertEqual(self._instance.uuid, vm.name)
self.assertEqual(self._instance.uuid,
vm.get('summary.config.instanceUuid'))
self.assertEqual(self._instance_values['vcpus'],
vm.get('summary.config.numCpu'))
self.assertEqual(self._instance_values['memory_mb'],
vm.get('summary.config.memorySizeMB'))
# Test NSX config
for optval in vm.get('config.extraConfig').OptionValue:
if optval.key == 'nvp.vm-uuid':
self.assertEqual(self._instance_values['uuid'], optval.value)
break
else:
self.fail('nvp.vm-uuid not found in extraConfig')
# Test that the VM is associated with the specified datastore
datastores = vm.datastore.ManagedObjectReference
self.assertEqual(1, len(datastores))
datastore = vmwareapi_fake._get_object(datastores[0])
self.assertEqual(self._ds.name, datastore.get('summary.name'))
# Test that the VM's network is configured as specified
devices = vm.get('config.hardware.device').VirtualDevice
for device in devices:
if device.obj_name != 'ns0:VirtualE1000':
continue
self.assertEqual(self._network_values['address'],
device.macAddress)
break
else:
self.fail('NIC not configured')
def test_spawn_cpu_limit(self):
cpu_limits = vm_util.Limits(limit=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_cpu_reservation(self):
cpu_limits = vm_util.Limits(reservation=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_cpu_allocations(self):
cpu_limits = vm_util.Limits(limit=7,
reservation=6)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_cpu_shares_level(self):
cpu_limits = vm_util.Limits(shares_level='high')
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_cpu_shares_custom(self):
cpu_limits = vm_util.Limits(shares_level='custom',
shares_share=1948)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_memory_limit(self):
memory_limits = vm_util.Limits(limit=7)
extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_memory_reservation(self):
memory_limits = vm_util.Limits(reservation=7)
extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_memory_allocations(self):
memory_limits = vm_util.Limits(limit=7,
reservation=6)
extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_memory_shares_level(self):
memory_limits = vm_util.Limits(shares_level='high')
extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_memory_shares_custom(self):
memory_limits = vm_util.Limits(shares_level='custom',
shares_share=1948)
extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_vif_limit(self):
vif_limits = vm_util.Limits(limit=7)
extra_specs = vm_util.ExtraSpecs(vif_limits=vif_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_vif_reservation(self):
vif_limits = vm_util.Limits(reservation=7)
extra_specs = vm_util.ExtraSpecs(vif_limits=vif_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_vif_shares_level(self):
vif_limits = vm_util.Limits(shares_level='high')
extra_specs = vm_util.ExtraSpecs(vif_limits=vif_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_vif_shares_custom(self):
vif_limits = vm_util.Limits(shares_level='custom',
shares_share=1948)
extra_specs = vm_util.ExtraSpecs(vif_limits=vif_limits)
self._test_spawn(extra_specs=extra_specs)
def _validate_extra_specs(self, expected, actual):
self.assertEqual(expected.cpu_limits.limit,
actual.cpu_limits.limit)
self.assertEqual(expected.cpu_limits.reservation,
actual.cpu_limits.reservation)
self.assertEqual(expected.cpu_limits.shares_level,
actual.cpu_limits.shares_level)
self.assertEqual(expected.cpu_limits.shares_share,
actual.cpu_limits.shares_share)
def _validate_flavor_extra_specs(self, flavor_extra_specs, expected):
# Validate that the extra specs are parsed correctly
flavor = objects.Flavor(name='my-flavor',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs=flavor_extra_specs)
flavor_extra_specs = self._vmops._get_extra_specs(flavor, None)
self._validate_extra_specs(expected, flavor_extra_specs)
def test_extra_specs_cpu_limit(self):
flavor_extra_specs = {'quota:cpu_limit': 7}
cpu_limits = vm_util.Limits(limit=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_cpu_reservations(self):
flavor_extra_specs = {'quota:cpu_reservation': 7}
cpu_limits = vm_util.Limits(reservation=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_cpu_allocations(self):
flavor_extra_specs = {'quota:cpu_limit': 7,
'quota:cpu_reservation': 6}
cpu_limits = vm_util.Limits(limit=7,
reservation=6)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_cpu_shares_level(self):
flavor_extra_specs = {'quota:cpu_shares_level': 'high'}
cpu_limits = vm_util.Limits(shares_level='high')
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_cpu_shares_custom(self):
flavor_extra_specs = {'quota:cpu_shares_level': 'custom',
'quota:cpu_shares_share': 1948}
cpu_limits = vm_util.Limits(shares_level='custom',
shares_share=1948)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_vif_shares_custom_pos01(self):
flavor_extra_specs = {'quota:vif_shares_level': 'custom',
'quota:vif_shares_share': 40}
vif_limits = vm_util.Limits(shares_level='custom',
shares_share=40)
extra_specs = vm_util.ExtraSpecs(vif_limits=vif_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_vif_shares_with_invalid_level(self):
flavor_extra_specs = {'quota:vif_shares_level': 'high',
'quota:vif_shares_share': 40}
vif_limits = vm_util.Limits(shares_level='custom',
shares_share=40)
extra_specs = vm_util.ExtraSpecs(vif_limits=vif_limits)
self.assertRaises(exception.InvalidInput,
self._validate_flavor_extra_specs, flavor_extra_specs, extra_specs)
def _make_vm_config_info(self, is_iso=False, is_sparse_disk=False,
vsphere_location=None):
disk_type = (constants.DISK_TYPE_SPARSE if is_sparse_disk
else constants.DEFAULT_DISK_TYPE)
file_type = (constants.DISK_FORMAT_ISO if is_iso
else constants.DEFAULT_DISK_FORMAT)
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=10 * units.Mi,
file_type=file_type,
disk_type=disk_type,
linked_clone=True,
vsphere_location=vsphere_location)
cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
mock_imagecache = mock.Mock()
mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock_imagecache)
return vi
@mock.patch.object(vmops.VMwareVMOps, 'check_cache_folder')
@mock.patch.object(vmops.VMwareVMOps, '_fetch_image_as_file')
@mock.patch.object(vmops.VMwareVMOps, '_prepare_iso_image')
@mock.patch.object(vmops.VMwareVMOps, '_prepare_sparse_image')
@mock.patch.object(vmops.VMwareVMOps, '_prepare_flat_image')
@mock.patch.object(vmops.VMwareVMOps, '_cache_iso_image')
@mock.patch.object(vmops.VMwareVMOps, '_cache_sparse_image')
@mock.patch.object(vmops.VMwareVMOps, '_cache_flat_image')
@mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
@mock.patch.object(vmops.VMwareVMOps, '_update_image_size')
def _test_fetch_image_if_missing(self,
mock_update_image_size,
mock_delete_datastore_file,
mock_cache_flat_image,
mock_cache_sparse_image,
mock_cache_iso_image,
mock_prepare_flat_image,
mock_prepare_sparse_image,
mock_prepare_iso_image,
mock_fetch_image_as_file,
mock_check_cache_folder,
is_iso=False,
is_sparse_disk=False):
tmp_dir_path = mock.Mock()
tmp_image_path = mock.Mock()
if is_iso:
mock_prepare = mock_prepare_iso_image
mock_cache = mock_cache_iso_image
elif is_sparse_disk:
mock_prepare = mock_prepare_sparse_image
mock_cache = mock_cache_sparse_image
else:
mock_prepare = mock_prepare_flat_image
mock_cache = mock_cache_flat_image
mock_prepare.return_value = tmp_dir_path, tmp_image_path
vi = self._make_vm_config_info(is_iso, is_sparse_disk)
self._vmops._fetch_image_if_missing(self._context, vi)
mock_check_cache_folder.assert_called_once_with(
self._ds.name, self._ds.ref)
mock_prepare.assert_called_once_with(vi)
mock_fetch_image_as_file.assert_called_once_with(
self._context, vi, tmp_image_path)
mock_cache.assert_called_once_with(vi, tmp_image_path)
mock_delete_datastore_file.assert_called_once_with(
str(tmp_dir_path), self._dc_info.ref)
if is_sparse_disk:
mock_update_image_size.assert_called_once_with(vi)
def test_fetch_image_if_missing(self):
self._test_fetch_image_if_missing()
def test_fetch_image_if_missing_with_sparse(self):
self._test_fetch_image_if_missing(
is_sparse_disk=True)
def test_fetch_image_if_missing_with_iso(self):
self._test_fetch_image_if_missing(
is_iso=True)
def test_get_esx_host_and_cookies(self):
datastore = mock.Mock()
datastore.get_connected_hosts.return_value = ['fira-host']
file_path = mock.Mock()
def fake_invoke(module, method, *args, **kwargs):
if method == 'AcquireGenericServiceTicket':
ticket = mock.Mock()
ticket.id = 'fira-ticket'
return ticket
elif method == 'get_object_property':
return 'fira-host'
with mock.patch.object(self._session, 'invoke_api', fake_invoke):
result = self._vmops._get_esx_host_and_cookies(datastore,
'ha-datacenter',
file_path)
self.assertEqual('fira-host', result[0])
cookies = result[1]
self.assertEqual(1, len(cookies))
self.assertEqual('vmware_cgi_ticket', cookies[0].name)
self.assertEqual('"fira-ticket"', cookies[0].value)
def test_fetch_vsphere_image(self):
vsphere_location = 'vsphere://my?dcPath=mycenter&dsName=mystore'
vi = self._make_vm_config_info(vsphere_location=vsphere_location)
image_ds_loc = mock.Mock()
datacenter_moref = mock.Mock()
fake_copy_task = mock.Mock()
with test.nested(
mock.patch.object(
self._session, 'invoke_api',
side_effect=[datacenter_moref, fake_copy_task]),
mock.patch.object(self._session, '_wait_for_task')) as (
invoke_api, wait_for_task):
self._vmops._fetch_vsphere_image(self._context, vi, image_ds_loc)
expected_calls = [
mock.call(
self._session.vim, 'FindByInventoryPath',
self._session.vim.service_content.searchIndex,
inventoryPath='mycenter'),
mock.call(self._session.vim, 'CopyDatastoreFile_Task',
self._session.vim.service_content.fileManager,
destinationDatacenter=self._dc_info.ref,
destinationName=str(image_ds_loc),
sourceDatacenter=datacenter_moref,
sourceName='[mystore]')]
invoke_api.assert_has_calls(expected_calls)
wait_for_task.assert_called_once_with(fake_copy_task)
@mock.patch.object(images, 'fetch_image')
@mock.patch.object(vmops.VMwareVMOps, '_get_esx_host_and_cookies')
def test_fetch_image_as_file(self,
mock_get_esx_host_and_cookies,
mock_fetch_image):
vi = self._make_vm_config_info()
image_ds_loc = mock.Mock()
host = mock.Mock()
dc_name = 'ha-datacenter'
cookies = mock.Mock()
mock_get_esx_host_and_cookies.return_value = host, cookies
self._vmops._fetch_image_as_file(self._context, vi, image_ds_loc)
mock_get_esx_host_and_cookies.assert_called_once_with(
vi.datastore,
dc_name,
image_ds_loc.rel_path)
mock_fetch_image.assert_called_once_with(
self._context,
vi.instance,
host,
self._session._port,
dc_name,
self._ds.name,
image_ds_loc.rel_path,
cookies=cookies)
@mock.patch.object(vutil, 'get_inventory_path')
@mock.patch.object(images, 'fetch_image')
@mock.patch.object(vmops.VMwareVMOps, '_get_esx_host_and_cookies')
def test_fetch_image_as_file_exception(self,
mock_get_esx_host_and_cookies,
mock_fetch_image,
mock_get_inventory_path):
vi = self._make_vm_config_info()
image_ds_loc = mock.Mock()
dc_name = 'ha-datacenter'
mock_get_esx_host_and_cookies.side_effect = \
exception.HostNotFound(host='')
mock_get_inventory_path.return_value = self._dc_info.name
self._vmops._fetch_image_as_file(self._context, vi, image_ds_loc)
mock_get_esx_host_and_cookies.assert_called_once_with(
vi.datastore,
dc_name,
image_ds_loc.rel_path)
mock_fetch_image.assert_called_once_with(
self._context,
vi.instance,
self._session._host,
self._session._port,
self._dc_info.name,
self._ds.name,
image_ds_loc.rel_path,
cookies='Fake-CookieJar')
@mock.patch.object(images, 'fetch_image_stream_optimized',
return_value=123)
def test_fetch_image_as_vapp(self, mock_fetch_image):
vi = self._make_vm_config_info()
image_ds_loc = mock.Mock()
image_ds_loc.parent.basename = 'fake-name'
self._vmops._fetch_image_as_vapp(self._context, vi, image_ds_loc)
mock_fetch_image.assert_called_once_with(
self._context,
vi.instance,
self._session,
'fake-name',
self._ds.name,
vi.dc_info.vmFolder,
self._vmops._root_resource_pool)
self.assertEqual(vi.ii.file_size, 123)
@mock.patch.object(images, 'fetch_image_ova', return_value=123)
def test_fetch_image_as_ova(self, mock_fetch_image):
vi = self._make_vm_config_info()
image_ds_loc = mock.Mock()
image_ds_loc.parent.basename = 'fake-name'
self._vmops._fetch_image_as_ova(self._context, vi, image_ds_loc)
mock_fetch_image.assert_called_once_with(
self._context,
vi.instance,
self._session,
'fake-name',
self._ds.name,
vi.dc_info.vmFolder,
self._vmops._root_resource_pool)
self.assertEqual(vi.ii.file_size, 123)
@mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
def test_prepare_iso_image(self, mock_generate_uuid):
vi = self._make_vm_config_info(is_iso=True)
tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_iso_image(vi)
expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s.iso' % (
self._ds.name, self._image_id, self._image_id)
self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
@mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
@mock.patch.object(ds_util, 'mkdir')
def test_prepare_sparse_image(self, mock_mkdir, mock_generate_uuid):
vi = self._make_vm_config_info(is_sparse_disk=True)
tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_sparse_image(vi)
expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s' % (
self._ds.name, self._image_id, "tmp-sparse.vmdk")
self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
mock_mkdir.assert_called_once_with(self._session,
tmp_image_ds_loc.parent,
vi.dc_info.ref)
@mock.patch.object(ds_util, 'mkdir')
@mock.patch.object(vm_util, 'create_virtual_disk')
@mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
@mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
def test_prepare_flat_image(self,
mock_generate_uuid,
mock_delete_datastore_file,
mock_create_virtual_disk,
mock_mkdir):
vi = self._make_vm_config_info()
tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_flat_image(vi)
expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s-flat.vmdk' % (
self._ds.name, self._image_id, self._image_id)
expected_image_path_parent = '[%s] vmware_temp/tmp-uuid/%s' % (
self._ds.name, self._image_id)
expected_path_to_create = '[%s] vmware_temp/tmp-uuid/%s/%s.vmdk' % (
self._ds.name, self._image_id, self._image_id)
mock_mkdir.assert_called_once_with(
self._session, DsPathMatcher(expected_image_path_parent),
self._dc_info.ref)
self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
image_info = vi.ii
mock_create_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
image_info.adapter_type,
image_info.disk_type,
DsPathMatcher(expected_path_to_create),
image_info.file_size_in_kb)
mock_delete_datastore_file.assert_called_once_with(
DsPathMatcher(expected_image_path),
self._dc_info.ref)
@mock.patch.object(ds_util, 'file_move')
def test_cache_iso_image(self, mock_file_move):
vi = self._make_vm_config_info(is_iso=True)
tmp_image_ds_loc = mock.Mock()
self._vmops._cache_iso_image(vi, tmp_image_ds_loc)
mock_file_move.assert_called_once_with(
self._session, self._dc_info.ref,
tmp_image_ds_loc.parent,
DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id))
@mock.patch.object(ds_util, 'file_move')
def test_cache_flat_image(self, mock_file_move):
vi = self._make_vm_config_info()
tmp_image_ds_loc = mock.Mock()
self._vmops._cache_flat_image(vi, tmp_image_ds_loc)
mock_file_move.assert_called_once_with(
self._session, self._dc_info.ref,
tmp_image_ds_loc.parent,
DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id))
@mock.patch.object(ds_util, 'disk_move')
@mock.patch.object(ds_util, 'mkdir')
def test_cache_stream_optimized_image(self, mock_mkdir, mock_disk_move):
vi = self._make_vm_config_info()
self._vmops._cache_stream_optimized_image(vi, mock.sentinel.tmp_image)
mock_mkdir.assert_called_once_with(
self._session,
DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id),
self._dc_info.ref)
mock_disk_move.assert_called_once_with(
self._session, self._dc_info.ref,
mock.sentinel.tmp_image,
DsPathMatcher('[fake_ds] vmware_base/%s/%s.vmdk' %
(self._image_id, self._image_id)))
@mock.patch.object(ds_util, 'file_move')
@mock.patch.object(vm_util, 'copy_virtual_disk')
@mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
def test_cache_sparse_image(self,
mock_delete_datastore_file,
mock_copy_virtual_disk,
mock_file_move):
vi = self._make_vm_config_info(is_sparse_disk=True)
sparse_disk_path = "[%s] vmware_temp/tmp-uuid/%s/tmp-sparse.vmdk" % (
self._ds.name, self._image_id)
tmp_image_ds_loc = ds_obj.DatastorePath.parse(sparse_disk_path)
self._vmops._cache_sparse_image(vi, tmp_image_ds_loc)
target_disk_path = "[%s] vmware_temp/tmp-uuid/%s/%s.vmdk" % (
self._ds.name,
self._image_id, self._image_id)
mock_copy_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
sparse_disk_path,
DsPathMatcher(target_disk_path))
def test_get_storage_policy_none(self):
flavor = objects.Flavor(name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs={})
self.flags(pbm_enabled=True,
pbm_default_policy='fake-policy', group='vmware')
extra_specs = self._vmops._get_extra_specs(flavor, None)
self.assertEqual('fake-policy', extra_specs.storage_policy)
def test_get_storage_policy_extra_specs(self):
extra_specs = {'vmware:storage_policy': 'flavor-policy'}
flavor = objects.Flavor(name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs=extra_specs)
self.flags(pbm_enabled=True,
pbm_default_policy='default-policy', group='vmware')
extra_specs = self._vmops._get_extra_specs(flavor, None)
self.assertEqual('flavor-policy', extra_specs.storage_policy)
def test_get_base_folder_not_set(self):
self.flags(image_cache_subdirectory_name='vmware_base')
base_folder = self._vmops._get_base_folder()
self.assertEqual('vmware_base', base_folder)
def test_get_base_folder_host_ip(self):
self.flags(my_ip='7.7.7.7',
image_cache_subdirectory_name='_base')
base_folder = self._vmops._get_base_folder()
self.assertEqual('7.7.7.7_base', base_folder)
def test_get_base_folder_cache_prefix(self):
self.flags(cache_prefix='my_prefix', group='vmware')
self.flags(image_cache_subdirectory_name='_base')
base_folder = self._vmops._get_base_folder()
self.assertEqual('my_prefix_base', base_folder)
def _test_reboot_vm(self, reboot_type="SOFT", tool_status=True):
expected_methods = ['get_object_properties_dict']
if reboot_type == "SOFT":
expected_methods.append('RebootGuest')
else:
expected_methods.append('ResetVM_Task')
def fake_call_method(module, method, *args, **kwargs):
expected_method = expected_methods.pop(0)
self.assertEqual(expected_method, method)
if expected_method == 'get_object_properties_dict' and tool_status:
return {
"runtime.powerState": "poweredOn",
"summary.guest.toolsStatus": "toolsOk",
"summary.guest.toolsRunningStatus": "guestToolsRunning"}
elif expected_method == 'get_object_properties_dict':
return {"runtime.powerState": "poweredOn"}
elif expected_method == 'ResetVM_Task':
return 'fake-task'
with test.nested(
mock.patch.object(vm_util, "get_vm_ref",
return_value='fake-vm-ref'),
mock.patch.object(self._session, "_call_method",
fake_call_method),
mock.patch.object(self._session, "_wait_for_task")
) as (_get_vm_ref, fake_call_method, _wait_for_task):
self._vmops.reboot(self._instance, self.network_info, reboot_type)
_get_vm_ref.assert_called_once_with(self._session,
self._instance)
if reboot_type == "HARD":
_wait_for_task.assert_has_calls([
mock.call('fake-task')])
def test_reboot_vm_soft(self):
self._test_reboot_vm()
def test_reboot_vm_hard_toolstatus(self):
self._test_reboot_vm(reboot_type="HARD", tool_status=False)
def test_reboot_vm_hard(self):
self._test_reboot_vm(reboot_type="HARD")
def test_get_instance_metadata(self):
flavor = objects.Flavor(id=7,
name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs={})
self._instance.flavor = flavor
metadata = self._vmops._get_instance_metadata(
self._context, self._instance)
expected = ("name:fake_display_name\n"
"userid:fake_user\n"
"username:None\n"
"projectid:fake_project\n"
"projectname:None\n"
"flavor:name:m1.small\n"
"flavor:memory_mb:6\n"
"flavor:vcpus:28\n"
"flavor:ephemeral_gb:8128\n"
"flavor:root_gb:496\n"
"flavor:swap:33550336\n"
"imageid:70a599e0-31e7-49b7-b260-868f441e862b\n"
"package:%s\n" % version.version_string_with_package())
self.assertEqual(expected, metadata)
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'get_network_attach_config_spec',
return_value='fake-attach-spec')
@mock.patch.object(vm_util, 'get_attach_port_index', return_value=1)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def test_attach_interface(self, mock_get_vm_ref,
mock_get_attach_port_index,
mock_get_network_attach_config_spec,
mock_reconfigure_vm,
mock_extra_specs):
_network_api = mock.Mock()
self._vmops._network_api = _network_api
vif_info = vif.get_vif_dict(self._session, self._cluster,
'VirtualE1000',
utils.is_neutron(),
self._network_values)
extra_specs = vm_util.ExtraSpecs()
mock_extra_specs.return_value = extra_specs
self._vmops.attach_interface(self._instance, self._image_meta,
self._network_values)
mock_get_vm_ref.assert_called_once_with(self._session, self._instance)
mock_get_attach_port_index(self._session, 'fake-ref')
mock_get_network_attach_config_spec.assert_called_once_with(
self._session.vim.client.factory, vif_info, 1,
extra_specs.vif_limits)
mock_reconfigure_vm.assert_called_once_with(self._session,
'fake-ref',
'fake-attach-spec')
_network_api.update_instance_vnic_index(mock.ANY,
self._instance, self._network_values, 1)
@mock.patch.object(vif, 'get_network_device', return_value='device')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'get_network_detach_config_spec',
return_value='fake-detach-spec')
@mock.patch.object(vm_util, 'get_vm_detach_port_index', return_value=1)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def test_detach_interface(self, mock_get_vm_ref,
mock_get_detach_port_index,
mock_get_network_detach_config_spec,
mock_reconfigure_vm,
mock_get_network_device):
_network_api = mock.Mock()
self._vmops._network_api = _network_api
with mock.patch.object(self._session, '_call_method',
return_value='hardware-devices'):
self._vmops.detach_interface(self._instance, self._network_values)
mock_get_vm_ref.assert_called_once_with(self._session, self._instance)
mock_get_detach_port_index(self._session, 'fake-ref')
mock_get_network_detach_config_spec.assert_called_once_with(
self._session.vim.client.factory, 'device', 1)
mock_reconfigure_vm.assert_called_once_with(self._session,
'fake-ref',
'fake-detach-spec')
_network_api.update_instance_vnic_index(mock.ANY,
self._instance, self._network_values, None)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def test_get_mks_console(self, mock_get_vm_ref):
ticket = mock.MagicMock()
ticket.host = 'esx1'
ticket.port = 902
ticket.ticket = 'fira'
ticket.sslThumbprint = 'aa:bb:cc:dd:ee:ff'
ticket.cfgFile = '[ds1] fira/foo.vmx'
with mock.patch.object(self._session, '_call_method',
return_value=ticket):
console = self._vmops.get_mks_console(self._instance)
self.assertEqual('esx1', console.host)
self.assertEqual(902, console.port)
path = jsonutils.loads(console.internal_access_path)
self.assertEqual('fira', path['ticket'])
self.assertEqual('aabbccddeeff', path['thumbprint'])
self.assertEqual('[ds1] fira/foo.vmx', path['cfgFile'])
def test_get_cores_per_socket(self):
extra_specs = {'hw:cpu_sockets': 7}
flavor = objects.Flavor(name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs=extra_specs)
extra_specs = self._vmops._get_extra_specs(flavor, None)
self.assertEqual(4, int(extra_specs.cores_per_socket))
def test_get_folder_name(self):
uuid = uuidutils.generate_uuid()
name = 'fira'
expected = 'fira (%s)' % uuid
folder_name = self._vmops._get_folder_name(name, uuid)
self.assertEqual(expected, folder_name)
name = 'X' * 255
expected = '%s (%s)' % ('X' * 40, uuid)
folder_name = self._vmops._get_folder_name(name, uuid)
self.assertEqual(expected, folder_name)
self.assertEqual(79, len(folder_name))
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'get_network_attach_config_spec',
return_value='fake-attach-spec')
@mock.patch.object(vm_util, 'get_attach_port_index', return_value=1)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def test_attach_interface_with_limits(self, mock_get_vm_ref,
mock_get_attach_port_index,
mock_get_network_attach_config_spec,
mock_reconfigure_vm,
mock_extra_specs):
_network_api = mock.Mock()
self._vmops._network_api = _network_api
vif_info = vif.get_vif_dict(self._session, self._cluster,
'VirtualE1000',
utils.is_neutron(),
self._network_values)
vif_limits = vm_util.Limits(shares_level='custom',
shares_share=40)
extra_specs = vm_util.ExtraSpecs(vif_limits=vif_limits)
mock_extra_specs.return_value = extra_specs
self._vmops.attach_interface(self._instance, self._image_meta,
self._network_values)
mock_get_vm_ref.assert_called_once_with(self._session, self._instance)
mock_get_attach_port_index(self._session, 'fake-ref')
mock_get_network_attach_config_spec.assert_called_once_with(
self._session.vim.client.factory, vif_info, 1,
extra_specs.vif_limits)
mock_reconfigure_vm.assert_called_once_with(self._session,
'fake-ref',
'fake-attach-spec')
_network_api.update_instance_vnic_index(mock.ANY,
self._instance, self._network_values, 1)
| 48.283019 | 79 | 0.581016 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.