hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
026d2e3c5400ba7779e80350198ab4ac1f1d1a70
| 1,712 |
py
|
Python
|
lark/grammar.py
|
dsd/lark
|
01cfe322a14f36228ff3dab0421d1f1108b321d7
|
[
"MIT"
] | 1 |
2018-09-16T22:26:42.000Z
|
2018-09-16T22:26:42.000Z
|
lark/grammar.py
|
dsd/lark
|
01cfe322a14f36228ff3dab0421d1f1108b321d7
|
[
"MIT"
] | null | null | null |
lark/grammar.py
|
dsd/lark
|
01cfe322a14f36228ff3dab0421d1f1108b321d7
|
[
"MIT"
] | null | null | null |
class Symbol(object):
is_term = NotImplemented
def __init__(self, name):
self.name = name
def __eq__(self, other):
assert isinstance(other, Symbol), other
return self.is_term == other.is_term and self.name == other.name
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.name)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.name)
fullrepr = property(__repr__)
class Terminal(Symbol):
is_term = True
def __init__(self, name, filter_out=False):
self.name = name
self.filter_out = filter_out
@property
def fullrepr(self):
return '%s(%r, %r)' % (type(self).__name__, self.name, self.filter_out)
class NonTerminal(Symbol):
is_term = False
class Rule(object):
"""
origin : a symbol
expansion : a list of symbols
"""
def __init__(self, origin, expansion, alias=None, options=None):
self.origin = origin
self.expansion = expansion
self.alias = alias
self.options = options
def __str__(self):
return '<%s : %s>' % (self.origin, ' '.join(map(str,self.expansion)))
def __repr__(self):
return 'Rule(%r, %r, %r, %r)' % (self.origin, self.expansion, self.alias, self.options)
class RuleOptions:
def __init__(self, keep_all_tokens=False, expand1=False, priority=None):
self.keep_all_tokens = keep_all_tokens
self.expand1 = expand1
self.priority = priority
def __repr__(self):
return 'RuleOptions(%r, %r, %r)' % (
self.keep_all_tokens,
self.expand1,
self.priority,
)
| 25.552239 | 95 | 0.603972 |
5e040dd4aa9c94494a44573e13ea9b777fc1afa2
| 9,160 |
py
|
Python
|
icekit/publishing/middleware.py
|
ic-labs/django-icekit
|
c507ea5b1864303732c53ad7c5800571fca5fa94
|
[
"MIT"
] | 52 |
2016-09-13T03:50:58.000Z
|
2022-02-23T16:25:08.000Z
|
icekit/publishing/middleware.py
|
ic-labs/django-icekit
|
c507ea5b1864303732c53ad7c5800571fca5fa94
|
[
"MIT"
] | 304 |
2016-08-11T14:17:30.000Z
|
2020-07-22T13:35:18.000Z
|
icekit/publishing/middleware.py
|
ic-labs/django-icekit
|
c507ea5b1864303732c53ad7c5800571fca5fa94
|
[
"MIT"
] | 12 |
2016-09-21T18:46:35.000Z
|
2021-02-15T19:37:50.000Z
|
import inspect
from contextlib import contextmanager
from threading import current_thread
from django.core.urlresolvers import Resolver404, resolve
from django.http import HttpResponseRedirect
from .utils import get_draft_url, verify_draft_url
class PublishingMiddleware(object):
"""
Publishing middleware to set status flags and apply features:
- permit members of the "Content Reviewers" group to view drafts
- track whether this middleware has been activated for the current
thread, so we can tell when it is safe to trust the status it reports
- store the current user for use within the publishing manager where
we do not have access to the ``request`` object.
- set draft status flag if request context permits viewing drafts.
"""
_draft_request_context = {}
_middleware_active_status = {}
_current_user = {}
_draft_only_views = [
]
@staticmethod
def is_admin_request(request):
try:
return resolve(request.path).app_name == 'admin'
except Resolver404:
return False
@staticmethod
def is_api_request(request):
# Match API requests via a URL path like /api/
try:
if resolve(request.path).app_name == 'icekit-api':
return True
except Resolver404:
pass
# Match API requests via a django-hosts subdomain like api.HOSTNAME
try:
if request.host.urlconf == 'icekit.api.urls':
return True
except AttributeError:
pass
return False
@staticmethod
def is_draft_only_view(request):
resolved = resolve(request.path)
if inspect.isfunction(resolved.func):
view_name = resolved.func.__name__
else: # Possible class view
view_name = type(resolved.func).__name__
name = '%s.%s' % (resolved.func.__module__, view_name)
return name in PublishingMiddleware._draft_only_views
@staticmethod
def is_content_reviewer_user(request):
return request.user.is_authenticated() \
and request.user.groups.filter(name='Content Reviewers').exists()
@staticmethod
def is_staff_user(request):
return request.user.is_authenticated() and request.user.is_staff
@staticmethod
def is_draft_request(request):
""" Is this request explicly flagged as for draft content? """
return 'preview' in request.GET \
or 'edit' in request.GET # TODO Support legacy 'edit' name for now
@staticmethod
def is_draft(request):
"""
A request is considered to be in draft mode if:
- it is for *any* admin resource, since the admin site deals only with
draft objects and hides the published version from admin users
- it is for *any* view in *any* app that deals only with draft objects
- user is a member of the "Content Reviewer" group, since content
reviewers' sole purpose is to review draft content and they need not
see the published content
- the user is a staff member and therefore can see draft versions of
pages if they wish, and the 'preview' GET parameter flag is included
to show the draft page is definitely wanted instead of a normal
published page.
- the 'preview' GET parameter flag is included with a valid HMAC for
the requested URL, regardless of authenticated permissions.
"""
# Admin resource requested.
if PublishingMiddleware.is_admin_request(request):
return True
# API resource requested.
if PublishingMiddleware.is_api_request(request):
return True
# Draft-only view requested.
if PublishingMiddleware.is_draft_only_view(request):
return True
# Content reviewer made request.
if PublishingMiddleware.is_content_reviewer_user(request):
return True
# Draft mode requested.
if PublishingMiddleware.is_draft_request(request):
# User is staff.
if PublishingMiddleware.is_staff_user(request):
return True
# Request contains a valid draft mode HMAC in the querystring.
if verify_draft_url(request.get_full_path()):
return True
# Not draft mode.
return False
def process_request(self, request):
is_draft = self.is_draft(request)
# Redirect non-admin, GET method, draft mode requests, from staff users
# (not content reviewers), that don't have a valid draft mode HMAC in
# the querystring, to make URL sharing easy.
if all([
not PublishingMiddleware.is_admin_request(request),
not PublishingMiddleware.is_api_request(request),
request.method == 'GET',
is_draft,
PublishingMiddleware.is_staff_user(request),
not PublishingMiddleware.is_content_reviewer_user(request),
not verify_draft_url(request.get_full_path()),
]):
return HttpResponseRedirect(get_draft_url(request.get_full_path()))
# Set middleware active status.
PublishingMiddleware \
._middleware_active_status[current_thread()] = True
# Set current user
PublishingMiddleware._current_user[current_thread()] = \
request.user
# Set draft status
PublishingMiddleware._draft_request_context[current_thread()] = \
is_draft
# Add draft status to request, for use in templates.
request.IS_DRAFT = is_draft
@staticmethod
def process_response(request, response):
try:
del PublishingMiddleware._middleware_active_status[
current_thread()]
except KeyError:
pass
try:
del PublishingMiddleware._current_user[current_thread()]
except KeyError:
pass
try:
del PublishingMiddleware._draft_request_context[current_thread()]
except KeyError:
pass
return PublishingMiddleware.redirect_staff_to_draft_view_on_404(
request, response)
@staticmethod
def is_publishing_middleware_active():
try:
return PublishingMiddleware._middleware_active_status[
current_thread()]
except KeyError:
return False
@staticmethod
def get_current_user():
try:
return PublishingMiddleware._current_user[current_thread()]
except KeyError:
return None
@staticmethod
def is_draft_request_context():
try:
return PublishingMiddleware._draft_request_context[
current_thread()]
except KeyError:
return False
@staticmethod
def redirect_staff_to_draft_view_on_404(request, response):
"""
When a request fails with a 404, redirect to a (potential) draft
version of the resource if the user is a staff member permitted to view
drafts.
"""
if (response.status_code == 404
# No point redirecting if we already have a draft request
and not PublishingMiddleware.is_draft_request(request)
# Don't mess with admin requests at all
and not PublishingMiddleware.is_admin_request(request)
# Don't mess with API requests at all
and not PublishingMiddleware.is_api_request(request)
# Can user view draft content if we add the 'preview' param
and PublishingMiddleware.is_staff_user(request)):
# TODO Is there a sane way to check for draft version of resource
# at this URL path, without just redirecting the user to it?
return HttpResponseRedirect(get_draft_url(request.get_full_path()))
return response
def is_publishing_middleware_active():
return PublishingMiddleware.is_publishing_middleware_active()
def set_publishing_middleware_active(status):
PublishingMiddleware._middleware_active_status[current_thread()] = status
def is_draft_request_context():
return PublishingMiddleware.is_draft_request_context()
def set_draft_request_context(status):
PublishingMiddleware._draft_request_context[current_thread()] = status
def get_current_user():
return PublishingMiddleware.get_current_user()
def set_current_user(user):
PublishingMiddleware._current_user[current_thread()] = user
@contextmanager
def override_draft_request_context(status):
original = is_draft_request_context()
set_draft_request_context(status)
yield
set_draft_request_context(original)
@contextmanager
def override_publishing_middleware_active(status):
original = is_publishing_middleware_active()
set_publishing_middleware_active(status)
yield
set_publishing_middleware_active(original)
@contextmanager
def override_current_user(user):
original = get_current_user()
set_current_user(user)
yield
set_current_user(original)
| 36.494024 | 79 | 0.66714 |
03ed3d8fdbfa774bd5971ae8c67e876a00cd74ba
| 1,431 |
bzl
|
Python
|
ppx/_config/settings.bzl
|
layus/rules_ocaml
|
bbcc7bfe3787038b2207e07a1e795fcab32dc676
|
[
"Apache-2.0"
] | 14 |
2020-12-05T15:00:37.000Z
|
2022-01-03T23:41:51.000Z
|
ppx/_config/settings.bzl
|
layus/rules_ocaml
|
bbcc7bfe3787038b2207e07a1e795fcab32dc676
|
[
"Apache-2.0"
] | 43 |
2021-01-04T13:17:14.000Z
|
2021-11-22T14:26:50.000Z
|
ppx/_config/settings.bzl
|
layus/rules_ocaml
|
bbcc7bfe3787038b2207e07a1e795fcab32dc676
|
[
"Apache-2.0"
] | 4 |
2021-05-31T14:28:13.000Z
|
2021-11-19T09:13:55.000Z
|
load("//ocaml:providers.bzl",
"PpxCompilationModeSettingProvider",
"PpxPrintSettingProvider"
)
################################################################
def _ppx_compilation_mode_impl(ctx):
if ctx.build_setting_value not in ["native", "bytecode"]:
fail("Bad value for @ppx//print. Allowed values: native | bytecode")
return PpxCompilationModeSettingProvider(value = ctx.build_setting_value)
ppx_compilation_mode_flag = rule(
implementation = _ppx_compilation_mode_impl,
build_setting = config.string(flag = True),
doc = "Compilation mode command-line option: native or bytecode",
)
ppx_compilation_mode_setting = rule(
implementation = _ppx_compilation_mode_impl,
build_setting = config.string(),
doc = "Compilation mode constant setting.",
)
################################################################
def _ppx_print_impl(ctx):
if ctx.build_setting_value not in ["binary", "text"]:
fail("Bad value for @ppx//print. Allowed values: binary | text")
return PpxPrintSettingProvider(value = ctx.build_setting_value)
ppx_print_flag = rule(
implementation = _ppx_print_impl,
build_setting = config.string(flag = True),
doc = "PPX output format command-line option: binary or text.",
)
ppx_print_setting = rule(
implementation = _ppx_print_impl,
build_setting = config.string(),
doc = "PPX output format constant setting."
)
| 34.902439 | 77 | 0.662474 |
c5644414ad2ba78c87021c7cd6e1171be2e34066
| 1,671 |
py
|
Python
|
restbot/services/models.py
|
cwerner/restbot
|
ad5546e3b60dba395adbca4e2c51d29b12565a24
|
[
"Apache-2.0"
] | null | null | null |
restbot/services/models.py
|
cwerner/restbot
|
ad5546e3b60dba395adbca4e2c51d29b12565a24
|
[
"Apache-2.0"
] | null | null | null |
restbot/services/models.py
|
cwerner/restbot
|
ad5546e3b60dba395adbca4e2c51d29b12565a24
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
import joblib
import numpy as np
from loguru import logger
from restbot.core.messages import NO_VALID_PAYLOAD
from restbot.models.payload import (HousePredictionPayload,
payload_to_list)
from restbot.models.prediction import HousePredictionResult
class HousePriceModel(object):
RESULT_UNIT_FACTOR = 100000
def __init__(self, path):
self.path = path
self._load_local_model()
def _load_local_model(self):
self.model = joblib.load(self.path)
def _pre_process(self, payload: HousePredictionPayload) -> List:
logger.debug("Pre-processing payload.")
result = np.asarray(payload_to_list(payload)).reshape(1, -1)
return result
def _post_process(self, prediction: np.ndarray) -> HousePredictionResult:
logger.debug("Post-processing prediction.")
result = prediction.tolist()
human_readable_unit = result[0] * self.RESULT_UNIT_FACTOR
hpp = HousePredictionResult(median_house_value=human_readable_unit)
return hpp
def _predict(self, features: List) -> np.ndarray:
logger.debug("Predicting.")
prediction_result = self.model.predict(features)
return prediction_result
def predict(self, payload: HousePredictionPayload):
if payload is None:
raise ValueError(NO_VALID_PAYLOAD.format(payload))
pre_processed_payload = self._pre_process(payload)
prediction = self._predict(pre_processed_payload)
logger.info(prediction)
post_processed_result = self._post_process(prediction)
return post_processed_result
| 31.528302 | 77 | 0.699581 |
1b778666357ce0b8612c9290fe833b04a053104e
| 2,225 |
py
|
Python
|
my_objects.py
|
jeffakolb/Gnip-Filter-Optimization
|
23d15aadcd26cd316acc3c5d456000cb855ef050
|
[
"MIT"
] | 1 |
2021-01-13T22:31:00.000Z
|
2021-01-13T22:31:00.000Z
|
my_objects.py
|
jeffakolb/Gnip-Filter-Optimization
|
23d15aadcd26cd316acc3c5d456000cb855ef050
|
[
"MIT"
] | null | null | null |
my_objects.py
|
jeffakolb/Gnip-Filter-Optimization
|
23d15aadcd26cd316acc3c5d456000cb855ef050
|
[
"MIT"
] | 1 |
2019-12-17T18:26:01.000Z
|
2019-12-17T18:26:01.000Z
|
import json
search_configs = [
{
'name':'test_run',
'rule':'apple lang:en',
'start':'2016-07-25T00:00',
'end':'2016-07-26T00:00',
'max_tweets':1000
}
]
labeling_config = {
'label_fraction':0.1, # score this fraction of the tweets
'max_num_to_label':10, # never score more than this number of tweets
# only display tweet bodies, with newlines stripped out
'payload_element_to_score':lambda x: {'body':x['body'].replace('\n',' ')}
}
class AppleVarietyRejector(object):
apple_varieties = ['gala','fuji','granny smith']
def filter(self,tweet):
return all( [token.lower() not in self.apple_varieties for token in tweet['body'].split()] )
class LongNameFilter(object):
def filter(self,tweet):
preferred_name = tweet['actor']['preferredUsername']
if len(preferred_name) > 9:
return True
else:
return False
class NameLengthClassifier(object):
def classify(self,tweet):
try:
preferred_name = tweet['actor']['preferredUsername']
except json.JSONDecodeError:
return -1
return len(preferred_name)
class AppleDeviceClassifier(object):
classes = {
'iphone':['iphone','iphone5','iphone5s','iphone5c','iphone5se','iphone6','iphone6s','iphone6plus'],
'ipad':['ipad','ipadair','ipadair2','ipad2','ipad3','ipadmini'],
'macbook':['macbook','mbp','macbookpro'],
}
def classify(self,tweet):
tokens = [token.lower().strip().rstrip() for token in tweet['body'].split()]
if any( [token in self.classes['iphone'] for token in tokens] ):
return 'iphone'
if any( [token in self.classes['ipad'] for token in tokens] ):
return 'ipad'
if any( [token in self.classes['macbook'] for token in tokens] ):
return 'macbook'
return 'other'
config = { 'search_config': search_configs[0],
'labeling_config' : labeling_config,
'name' : 'test_run'
}
config['filter'] = AppleVarietyRejector()
config['classifier'] = AppleDeviceClassifier()
| 35.31746 | 111 | 0.587416 |
932a36014760f07ccf40faa2298706e84a99a112
| 11,159 |
py
|
Python
|
src/webdriver/appengine_communicator.py
|
ka2th1k/qualitybots
|
391e2419c0886463e2a7c3d46a35523f95ce9672
|
[
"Apache-2.0"
] | 4 |
2015-10-20T13:38:12.000Z
|
2021-04-28T02:02:02.000Z
|
src/webdriver/appengine_communicator.py
|
ka2th1k/qualitybots
|
391e2419c0886463e2a7c3d46a35523f95ce9672
|
[
"Apache-2.0"
] | null | null | null |
src/webdriver/appengine_communicator.py
|
ka2th1k/qualitybots
|
391e2419c0886463e2a7c3d46a35523f95ce9672
|
[
"Apache-2.0"
] | 2 |
2016-04-02T16:53:26.000Z
|
2016-09-10T02:46:30.000Z
|
#!/usr/bin/python2.6
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles test distribution and results upload to app engine."""
import base64
import json
import math
import random
import time
import urllib
import urllib2
import zlib
import blobstore_upload
import client_logging
# Define the constants
_BLOBSTORE_UPLOAD_RETRIES = 3
_PIECES_UPLOAD_RETRIES = 3
_MAX_WAIT_TIME = 3
_TEST_DISTRIBUTION_SERVER = 'http://YOUR_APPENGINE_SERVER_HERE'
_FETCH_TEST_URL = _TEST_DISTRIBUTION_SERVER + '/distributor/accept_work_item'
_FINISH_TEST_URL = _TEST_DISTRIBUTION_SERVER + '/distributor/finish_work_item'
_RESULTS_SERVER = 'http://YOUR_APPENGINE_SERVER_HERE'
_RESULTS_UPLOAD_URL = _RESULTS_SERVER + '/putdata'
_LOG_UPLOAD_URL = _RESULTS_SERVER + '/distributor/upload_client_log'
LOGGER_NAME = 'appengine_communicator'
# Initialize the logger for this module
logger = client_logging.GetLogger(LOGGER_NAME)
class CommunicationError(Exception):
pass
class AuthCookie(object):
"""A data object that contains cookie dictionaries used to authenticate.
Attributes:
domain: A string representing the domain to authenticate on.
cookies: A list of dictionaries that define the cookies to add to the
browser in order to authenticate for a webpage.
"""
def __init__(self, domain, cookies):
self.domain = domain
self.cookies = cookies
class TestCase(object):
"""A data object describing a test case to run for bots.
Attributes:
url: A string indicating the URL to run for the test.
start_time: A string indicating the start time for the test.
config: A dictionary that specifies various configuration settings for
the test.
test_key: An integer representing the key that identifies this test.
auth_cookie: An AuthCookie object that represents data for authenticating
for the test case.
"""
def __init__(self, url, start_time, config, test_key, auth_domain=None,
auth_cookies=None):
self.url = url
self.start_time = start_time
self.config = config
self.test_key = test_key
self.auth_cookie = None
if auth_domain and auth_cookies:
self.auth_cookie = AuthCookie(auth_domain, auth_cookies)
class AppEngineCommunicator(object):
"""Handles communication with the test distributor and results servers.
Attributes:
_token: A string representing the token to use to pull tests from the
distributor.
_useragent: A string representing the useragent of the browser under test.
_instance_id: A string representing a unique identifier for the machine
instance.
_current_test_case: A TestCase object representing the current test case.
_log_uploaded: A boolean indicating whether the log file has been uploaded.
"""
def __init__(self, token, useragent, instance_id):
# Set up the attributes
self._token = token
self._useragent = useragent
self._instance_id = instance_id
self._current_test_case = None
self._log_uploaded = False
# TODO(user): Move this function into a shared utility module.
@staticmethod
def ExponentialBackoff(attempt, max_wait_time=_MAX_WAIT_TIME):
"""Wait a time that increases exponentially with the attempt number.
Args:
attempt: The most recent attempt number (starting at 0).
max_wait_time: An optional int that specifies the max base time to wait
in seconds.
"""
sleep_time = math.pow(2, attempt) * random.uniform(0.5, 1.0) * max_wait_time
time.sleep(sleep_time)
def FetchTest(self):
"""Fetch a new test from the test distributor.
This function will not prevent you from fetching another test if you have a
current test case that hasn't been finished. The old test case will be over
written by the new test case.
Returns:
A TestCase object describing the test case that was fetched. If there are
no more tests to run, None is returned.
Raises:
CommunicationError: There is an error in fetching the test.
"""
# Fetch the test case from the test distributor.
try:
data = urllib.urlencode({
'tokens': self._token, 'useragent': urllib.quote(self._useragent),
'instance_id': self._instance_id})
url_page = urllib2.urlopen(_FETCH_TEST_URL, data)
except urllib2.URLError:
self._LogAndRaiseException('Failed to fetch a test from app engine.')
# Process the data from the test distributor.
self._current_test_case = None
try:
test_dictionary = json.loads(url_page.read())
# Check if there is a test available.
if test_dictionary:
test_config = json.loads(test_dictionary['config'])
auth_domain = None
auth_cookies = None
if 'auth_domain' in test_config:
auth_domain = test_config['auth_domain']
if 'auth_cookies' in test_config:
auth_cookies = test_config['auth_cookies']
self._current_test_case = TestCase(
test_dictionary['data_str'][19:-1], test_dictionary['start_time'],
test_config, test_dictionary['key'], auth_domain=auth_domain,
auth_cookies=auth_cookies)
except ValueError:
logger.exception('Could not process the data from the test distributor.')
return self._current_test_case
def FinishTest(self, result):
"""Acknowledge that the current test case has been finished.
Args:
result: A string indicating the result of executing the test case.
Raises:
CommunicationError: There is an error communicating with
the test distributor.
"""
# Make sure there is a current test case to finish.
if not self._current_test_case:
return
try:
data = urllib.urlencode({'key': self._current_test_case.test_key,
'result': result,
'instance_id': self._instance_id})
urllib2.urlopen(_FINISH_TEST_URL, data)
self._current_test_case = None
except urllib2.URLError:
self._LogAndRaiseException('Failed acknowledging that the test finished.')
def _LogAndRaiseException(self, message):
"""Log the current exception being handled and raise a new exception.
Args:
message: A string indicating the message to log and use with the new
exception.
Raises:
CommunicationError: This exception is always raised using the given
message.
"""
logger.exception(message)
raise CommunicationError(message)
def UploadResults(self, nodes_table, layout_table, dynamic_content_table,
png, channel=''):
"""Upload the test case results to the results server.
Args:
nodes_table: A list representing the node results from the test case.
layout_table: A list representing the layout results from the test case.
dynamic_content_table: A list representing the dynamic content results
from the test case.
png: A string representing the binary data for a png image.
channel: An optional string representing the channel for the browser.
Raises:
CommunicationError: The initial upload communication failed.
"""
# Make sure there is a current test case to upload results for.
if not self._current_test_case:
return
# Format the results data for uploading.
suite_info = {
'date': self._current_test_case.start_time,
'key': self._current_test_case.test_key,
'refBrowser': self._current_test_case.config['refBrowser'],
'refBrowserChannel': self._current_test_case.config['refBrowserChannel']
}
data_to_send = {
'userAgent': self._useragent,
'url': self._current_test_case.url,
'nodesTable': base64.b64encode(
zlib.compress(json.dumps(nodes_table), 9)),
'dynamicContentTable': json.dumps(dynamic_content_table),
'width': self._current_test_case.config['width'],
'height': self._current_test_case.config['height'],
'channel': channel,
'suiteInfo': json.dumps(suite_info),
'instance_id': self._instance_id
}
# Upload the initial data.
try:
initial_send = urllib2.urlopen(
_RESULTS_UPLOAD_URL, urllib.urlencode(data_to_send))
except urllib2.URLError:
self._LogAndRaiseException('Failed on the initial results upload.')
response = initial_send.read()
if not response:
self._LogAndRaiseException(
'Initial results upload did not provide continuation data.')
response = json.loads(response)
upload_key = response['key'].encode('ascii')
num_pieces = int(response['nPieces'])
layout_table_length = len(layout_table)
logger.info('Uploading the image to blobstore with key "%s".', upload_key)
for attempt in range(_BLOBSTORE_UPLOAD_RETRIES):
try:
blobstore_upload.UploadImageToBlobstore(upload_key, png)
break
except blobstore_upload.BlobstoreUploadError:
logger.exception('Blobstore upload failed, attempt %d.', attempt+1)
AppEngineCommunicator.ExponentialBackoff(attempt)
# Send the layout table in the requested number of pieces.
logger.info('Uploading remaining results in %d pieces.', num_pieces)
n_rows_per_piece = int(math.ceil(layout_table_length / (num_pieces * 1.0)))
start = 0
end = n_rows_per_piece
for i in range(num_pieces):
data_pieces_to_send = {
'key': upload_key,
'layoutTable': json.dumps(layout_table[start:end]),
'i': i,
'instance_id': self._instance_id
}
for attempt in range(_PIECES_UPLOAD_RETRIES):
try:
urllib2.urlopen(_RESULTS_UPLOAD_URL,
urllib.urlencode(data_pieces_to_send))
break
except urllib2.URLError:
logger.exception('Piece "%d" upload failed, attempt %d.',
i, attempt+1)
AppEngineCommunicator.ExponentialBackoff(attempt)
start = end
end = min(end+n_rows_per_piece, len(layout_table))
def UploadLog(self, log):
"""Upload the test case results to the results server.
Args:
log: A string representing the client log to upload.
"""
# Upload the log data if this is our first upload.
if self._log_uploaded:
return
try:
urllib2.urlopen(_LOG_UPLOAD_URL, urllib.urlencode(
{'log': base64.b64encode(zlib.compress(json.dumps(log), 9)),
'instance_id': self._instance_id}))
self._log_uploaded = True
except:
raise CommunicationError('Failed to upload the client log.')
| 34.441358 | 80 | 0.698987 |
b9cb9baec3886d7556f2b57b909e95067841c8cc
| 630 |
py
|
Python
|
visualizer/migrations/0003_video.py
|
FuckBrains/Artilizer
|
dd7a66a7e0a2c9cf260e11fc5e4ec7d50cd51d9e
|
[
"Apache-2.0"
] | 4 |
2019-09-21T16:02:39.000Z
|
2020-09-08T00:56:40.000Z
|
visualizer/migrations/0003_video.py
|
FuckBrains/Artilizer
|
dd7a66a7e0a2c9cf260e11fc5e4ec7d50cd51d9e
|
[
"Apache-2.0"
] | 11 |
2019-09-22T16:50:45.000Z
|
2021-06-09T18:40:31.000Z
|
visualizer/migrations/0003_video.py
|
betafactory/Artilizer
|
dd7a66a7e0a2c9cf260e11fc5e4ec7d50cd51d9e
|
[
"Apache-2.0"
] | 5 |
2019-09-21T15:46:47.000Z
|
2021-05-26T19:30:29.000Z
|
# Generated by Django 2.2.6 on 2019-10-05 16:57
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('visualizer', '0002_delete_video'),
]
operations = [
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', models.CharField(max_length=10000)),
('videofile', models.FileField(null=True, upload_to='videos/', verbose_name='')),
],
),
]
| 26.25 | 114 | 0.580952 |
f68da3c9c0a3e8c104c654cdb3d0c0075872dd09
| 5,756 |
py
|
Python
|
userbot/plugins/pmpermit_menu.py
|
saskeuday/masterSaske
|
5f4db35c718f85a4d68b86bd2c1c6221e4b6d319
|
[
"MIT"
] | null | null | null |
userbot/plugins/pmpermit_menu.py
|
saskeuday/masterSaske
|
5f4db35c718f85a4d68b86bd2c1c6221e4b6d319
|
[
"MIT"
] | null | null | null |
userbot/plugins/pmpermit_menu.py
|
saskeuday/masterSaske
|
5f4db35c718f85a4d68b86bd2c1c6221e4b6d319
|
[
"MIT"
] | null | null | null |
# if you change credits, you get anal cancer and get murdered by russians in 3 days.
"""
Support chatbox for pmpermit.
Used by incoming messages with trigger as start
Will not work for already approved people.
"""
import asyncio
import io
import telethon.sync
from telethon.tl.functions.users import GetFullUserRequest
import userbot.plugins.sql_helper.pmpermit_sql as pmpermit_sql
from telethon import events, errors, functions, types
from userbot import ALIVE_NAME
from userbot.utils import admin_cmd
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "Set ALIVE_NAME in heroku vars"
PREV_REPLY_MESSAGE = {}
@command(pattern=r"\/start", incoming=True)
async def _(event):
chat_id = event.from_id
userid = event.sender_id
if not pmpermit_sql.is_approved(chat_id):
chat = await event.get_chat()
if event.fwd_from:
return
if event.is_private:
PM = ("`مرحباً. لقد تم ايصالك إلى القائمة المتاحة للسيد ѕᴀѕᴋᴇ ʟ̤ɾʅ丂ɹɹɹȊɹɹɹ ,`"
f"{DEFAULTUSER}.\n"
"__دعونا نجعل هذا سلسًا وأخبرني لماذا أنت هنا ಠ_ಠ__\n"
"**اختر أحد الأسباب التالية لوجودك هنا (ارسل رقم خيارك):**\n\n"
"`1`. للدردشة مع سيدي 😺\n"
"`2`. لازعاج ʟ̤ɾʅ丂ɹɹɹȊɹɹɹ ಠ_ಠ.\n"
"`3`. للاستفسار عن شيء ما (⌐■_■)\n"
"`4`. لطلب شيء 🎭\n")
ONE = ("__حسناً. تم تسجيل طلبك. لا ترسل المزيد من الرسائل المزعجه إلى أستاذي. يمكنك توقع الرد في غضون 24 سنة ضوئية. إنه رجل مشغول ، على عكسك على الأرجح(¬‿¬) .__\n\n"
"**⚠️ سيتم حظرك والإبلاغ عنك إذا قمت بإرسال رسائل غير مرغوب فيها. ⚠️**\n\n")
TWO = (" `███████▄▄███████████▄ \n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓███░░░░░░░░░░░░█\n██████▀▀▀█░░░░██████▀ \n░░░░░░░░░█░░░░█ \n░░░░░░░░░░█░░░█ \n░░░░░░░░░░░█░░█ \n░░░░░░░░░░░█░░█ \n░░░░░░░░░░░░▀▀ `\n\n**رائع جداً 🌝🌿، هذا ليس منزلك. اذهب لازعاج شخص آخر. لقد تم حظرك والإبلاغ عنك حتى إشعار آخر 🎭**")
FOUR = ("__حسناً. لم يطلع سيدي على رسالتك حتى الآن ، وعادةً ما يرد على الأشخاص ، على الرغم من ذلك ساقوم بايصال رسالتك لسيدي🌿 .__\n __سيرد عندما يعود إذا أراد ذلك ، فهناك بالفعل الكثير من الرسائل المعلقة😶__\n **من فضلك لا ترسل شيٌ أخر إلا إذا كنت ترغب في أن يتم حظره والإبلاغ عنك (●'◡'●).**")
FIVE = ("`حسنا. يرجى الحصول على الأخلاق الأساسية لعدم إزعاج سيدي كثيرا. إذا كان يرغب في مساعدتك ، فسوف يرد عليك قريبًا 👀.`\n**لا تسأل مرارا وتكرارا وإلا سيتم حظرك والإبلاغ عنك.**")
LWARN = ("**هذا هو التحذير الأخير الخاص بك. لا ترسل رسالة أخرى وإلا سيتم حظرك والإبلاغ عنك. كن صابر. سيدي سوف يرد عليك في اسرع وقت ممكن 🌝🌿.**")
async with borg.conversation(chat) as conv:
await borg.send_message(chat, PM)
chat_id = event.from_id
response = await conv.get_response(chat)
y = response.text
if y == "1":
await borg.send_message(chat, ONE)
response = await conv.get_response(chat)
await event.delete()
if not response.text == "start":
await response.delete()
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
response = await conv.get_response(chat)
if not response.text == "start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif y == "2":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
if not response.text == "start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif y == "3":
await borg.send_message(chat, FOUR)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
if not response.text == "start":
await borg.send_message(chat, LWARN)
await event.delete()
response = await conv.get_response(chat)
if not response.text == "start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif y == "4":
await borg.send_message(chat,FIVE)
response = await conv.get_response(chat)
if not response.text == "start":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
if not response.text == "start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
else:
await borg.send_message(chat, "`لقد قمت بإدخال أمر غير صالح يرجى الإرسال\n🍁 start 🍁\nمرة أخرى أو عدم إرسال رسالة أخرى إذا كنت لا ترغب في أن يتم حظرك والإبلاغ عنك🌝🌿.`")
response = await conv.get_response(chat)
z = response.text
if not z == "/start":
await borg.send_message(chat, LWARN)
await conv.get_response(chat)
if not response.text == "start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
| 52.327273 | 405 | 0.561501 |
3624202ecec1fd3d6e353a52a5b9ba5a4f0823c3
| 1,536 |
py
|
Python
|
frappe/integrations/oauth2_logins.py
|
farhan2222/frappe
|
7d06f9ce503e108239073a5c71ad1d9f7df24850
|
[
"MIT"
] | null | null | null |
frappe/integrations/oauth2_logins.py
|
farhan2222/frappe
|
7d06f9ce503e108239073a5c71ad1d9f7df24850
|
[
"MIT"
] | null | null | null |
frappe/integrations/oauth2_logins.py
|
farhan2222/frappe
|
7d06f9ce503e108239073a5c71ad1d9f7df24850
|
[
"MIT"
] | 3 |
2019-01-11T21:34:52.000Z
|
2020-03-18T07:53:09.000Z
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.utils
from frappe.utils.oauth import login_via_oauth2, login_via_oauth2_id_token
import json
@frappe.whitelist(allow_guest=True)
def login_via_google(code, state):
login_via_oauth2("google", code, state, decoder=json.loads)
@frappe.whitelist(allow_guest=True)
def login_via_github(code, state):
login_via_oauth2("github", code, state)
@frappe.whitelist(allow_guest=True)
def login_via_facebook(code, state):
login_via_oauth2("facebook", code, state, decoder=json.loads)
@frappe.whitelist(allow_guest=True)
def login_via_frappe(code, state):
login_via_oauth2("frappe", code, state, decoder=json.loads)
@frappe.whitelist(allow_guest=True)
def login_via_office365(code, state):
login_via_oauth2_id_token("office_365", code, state, decoder=json.loads)
@frappe.whitelist(allow_guest=True)
def login_via_salesforce(code, state):
login_via_oauth2("salesforce", code, state, decoder=json.loads)
@frappe.whitelist(allow_guest=True)
def custom(code, state):
"""
Callback for processing code and state for user added providers
process social login from /api/method/frappe.integrations.custom/<provider>
"""
path = frappe.request.path[1:].split("/")
if len(path) == 4 and path[3]:
provider = path[3]
# Validates if provider doctype exists
if frappe.db.exists("Social Login Key", provider):
login_via_oauth2(provider, code, state, decoder=json.loads)
| 32.680851 | 76 | 0.782552 |
f13099436d816f5c176d0e9d32022ce40d8dd97e
| 3,622 |
py
|
Python
|
reid/models/end2end.py
|
Proxim123/one-person-re-id
|
829fb585a10cb4948a70ee53aeca348b8ed4aa8c
|
[
"MIT"
] | 127 |
2019-01-11T04:33:42.000Z
|
2021-09-07T09:43:22.000Z
|
reid/models/end2end.py
|
ChronousZhang/One-Example-Person-ReID
|
43938c1bc9527f2dbddf35748efc05e6fcf10d51
|
[
"MIT"
] | 15 |
2019-01-26T05:39:23.000Z
|
2020-12-04T01:17:39.000Z
|
reid/models/end2end.py
|
ChronousZhang/One-Example-Person-ReID
|
43938c1bc9527f2dbddf35748efc05e6fcf10d51
|
[
"MIT"
] | 30 |
2019-01-11T07:46:25.000Z
|
2022-02-05T05:58:17.000Z
|
from __future__ import absolute_import
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
from torch.nn import init
import torch
import torchvision
import math
from .resnet import *
__all__ = ["End2End_AvgPooling"]
class AvgPooling(nn.Module):
def __init__(self, input_feature_size, num_classes, is_output_feature, embeding_fea_size=1024, dropout=0.5, classifier="CrossEntropyLoss"):
super(self.__class__, self).__init__()
self.is_output_feature = is_output_feature
# embeding
self.embeding_fea_size = embeding_fea_size
IDE_fea_size = 1024
Ex_fea_size = 2048
self.IDE_embeding = nn.Linear(input_feature_size, IDE_fea_size)
self.IDE_embeding_bn = nn.BatchNorm1d(IDE_fea_size)
self.Ex_embeding = nn.Linear(input_feature_size, Ex_fea_size)
self.Ex_embeding_bn = nn.BatchNorm1d(Ex_fea_size)
init.kaiming_normal_(self.IDE_embeding.weight, mode='fan_out')
init.constant_(self.IDE_embeding.bias, 0)
init.constant_(self.IDE_embeding_bn.weight, 1)
init.constant_(self.IDE_embeding_bn.bias, 0)
init.kaiming_normal_(self.Ex_embeding.weight, mode='fan_out')
init.constant_(self.Ex_embeding.bias, 0)
init.constant_(self.Ex_embeding_bn.weight, 1)
init.constant_(self.Ex_embeding_bn.bias, 0)
self.drop = nn.Dropout(dropout)
self.classify_fc = nn.Linear(IDE_fea_size, num_classes, bias=True)
init.normal_(self.classify_fc.weight, std = 0.001)
init.constant_(self.classify_fc.bias, 0)
self.cls = classifier
def forward(self, inputs):
pool5 = inputs.mean(dim = 1)
if (not self.training) and self.is_output_feature:
return F.normalize(pool5, p=2, dim=1)
""" IDE """
# embeding
net = self.drop(pool5)
net = self.IDE_embeding(net)
net = self.IDE_embeding_bn(net)
net = F.relu(net)
net = self.drop(net)
# classifier
predict = self.classify_fc(net)
if (not self.training) and (not self.is_output_feature):
return predict
""" Exclusive """
net = self.Ex_embeding(pool5)
net = self.Ex_embeding_bn(net)
net = F.normalize(net, p=2, dim=1)
Ex_feat = self.drop(net)
return predict, Ex_feat
class End2End_AvgPooling(nn.Module):
def __init__(self, pretrained=True, dropout=0, num_classes=0, is_output_feature=True, embeding_fea_size=1024, classifier="CrossEntropyLoss", fixed_layer=True):
super(self.__class__, self).__init__()
self.CNN = resnet50(dropout=dropout, fixed_layer=fixed_layer)
self.avg_pooling = AvgPooling(input_feature_size=2048, num_classes=num_classes, dropout=dropout, is_output_feature=is_output_feature, classifier=classifier,
embeding_fea_size = embeding_fea_size)
def forward(self, x):
assert len(x.data.shape) == 5
# reshape (batch, samples, ...) ==> (batch * samples, ...)
oriShape = x.data.shape
x = x.view(-1, oriShape[2], oriShape[3], oriShape[4])
# resnet encoding
resnet_feature = self.CNN(x)
# reshape back into (batch, samples, ...)
resnet_feature = resnet_feature.view(oriShape[0], oriShape[1], -1)
# avg pooling
# if eval and cut_off_before_logits, return predict; else return avg pooling feature
predict = self.avg_pooling(resnet_feature)
return predict
| 32.339286 | 164 | 0.653506 |
46b18c0e935fd77b7c9a98458728f8d41ea07c6a
| 3,016 |
py
|
Python
|
examples/exp_configs/non_rl/highway_single.py
|
lcipolina/flow
|
e2251d57d930b251896845a65fcb109f1c1f4087
|
[
"MIT"
] | 1 |
2020-03-25T00:03:06.000Z
|
2020-03-25T00:03:06.000Z
|
examples/exp_configs/non_rl/highway_single.py
|
DevPyer/flow
|
50be2d074027fb465fc4a9103b3cc09fb1123ede
|
[
"MIT"
] | 1 |
2020-03-16T21:43:07.000Z
|
2020-03-16T21:43:07.000Z
|
examples/exp_configs/non_rl/highway_single.py
|
lcipolina/flow
|
e2251d57d930b251896845a65fcb109f1c1f4087
|
[
"MIT"
] | null | null | null |
"""Multi-agent highway with ramps example.
Trains a non-constant number of agents, all sharing the same policy, on the
highway with ramps network.
"""
from flow.controllers import BandoFTLController
from flow.core.params import EnvParams
from flow.core.params import NetParams
from flow.core.params import InitialConfig
from flow.core.params import InFlows
from flow.core.params import VehicleParams
from flow.core.params import SumoParams
from flow.core.params import SumoLaneChangeParams
from flow.networks import HighwayNetwork
from flow.envs import TestEnv
from flow.networks.highway import ADDITIONAL_NET_PARAMS
TRAFFIC_SPEED = 11
END_SPEED = 16
TRAFFIC_FLOW = 2056
HORIZON = 3600
INCLUDE_NOISE = False
additional_net_params = ADDITIONAL_NET_PARAMS.copy()
additional_net_params.update({
# length of the highway
"length": 2500,
# number of lanes
"lanes": 1,
# speed limit for all edges
"speed_limit": 30,
# number of edges to divide the highway into
"num_edges": 2,
# whether to include a ghost edge of length 500m. This edge is provided a
# different speed limit.
"use_ghost_edge": True,
# speed limit for the ghost edge
"ghost_speed_limit": END_SPEED
})
vehicles = VehicleParams()
vehicles.add(
"human",
num_vehicles=0,
lane_change_params=SumoLaneChangeParams(
lane_change_mode="strategic",
),
acceleration_controller=(BandoFTLController, {
'alpha': .5,
'beta': 20.0,
'h_st': 12.0,
'h_go': 50.0,
'v_max': 30.0,
'noise': 1.0 if INCLUDE_NOISE else 0.0,
}),
)
inflows = InFlows()
inflows.add(
veh_type="human",
edge="highway_0",
vehs_per_hour=TRAFFIC_FLOW,
depart_lane="free",
depart_speed=TRAFFIC_SPEED,
name="idm_highway_inflow")
# SET UP FLOW PARAMETERS
flow_params = dict(
# name of the experiment
exp_tag='highway-single',
# name of the flow environment the experiment is running on
env_name=TestEnv,
# name of the network class the experiment is running on
network=HighwayNetwork,
# simulator that is used by the experiment
simulator='traci',
# environment related parameters (see flow.core.params.EnvParams)
env=EnvParams(
horizon=HORIZON,
warmup_steps=0,
sims_per_step=1,
),
# sumo-related parameters (see flow.core.params.SumoParams)
sim=SumoParams(
sim_step=0.5,
render=False,
restart_instance=False
),
# network-related parameters (see flow.core.params.NetParams and the
# network's documentation or ADDITIONAL_NET_PARAMS component)
net=NetParams(
inflows=inflows,
additional_params=additional_net_params
),
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
# parameters specifying the positioning of vehicles upon initialization/
# reset (see flow.core.params.InitialConfig)
initial=InitialConfig(),
)
| 27.171171 | 77 | 0.705902 |
c38e4a3250bc740136403e69de428ae69f721166
| 13,538 |
py
|
Python
|
simple_cvrp.py
|
PedroBCSilva/unisinos-ia-tga
|
48cfe7c9a1a1171b231c1bcaa69365a56c2e220f
|
[
"MIT"
] | null | null | null |
simple_cvrp.py
|
PedroBCSilva/unisinos-ia-tga
|
48cfe7c9a1a1171b231c1bcaa69365a56c2e220f
|
[
"MIT"
] | 2 |
2019-04-22T00:05:16.000Z
|
2019-04-22T00:05:22.000Z
|
simple_cvrp.py
|
PedroBCSilva/unisinos-ia-tga
|
48cfe7c9a1a1171b231c1bcaa69365a56c2e220f
|
[
"MIT"
] | null | null | null |
# COMO USAR python simple_cvrp.py --file "CVRP/test.txt"
import random, sys, copy, math
from optparse import OptionParser
from random import randint
class cvrp:
def __init__(self, filename, truck_count, repetitions):
self.filename = filename
self.nodes = []
self.nodes_count = 0
self.capacity = 0
self.deposit = None
self.init_data(self.filename)
self.truck_count = truck_count
self.trucks = []
self.init_trucks()
self.solution = self.init_solution()
self.total_distance = self.calc_total_distance()
self.repetitions = repetitions
self.printStatus()
self.solution = self.hill_climbing()
print("\nDepois do hillclimbing\n")
self.total_distance = self.calc_total_distance()
self.printStatus()
def init_data(self,filename):
node_coord = False
node_demand = False
nodes_count = 0
with open (filename, 'rt') as file:
for line in file:
if "NODE_COORD_SECTION" in line:
node_demand = False
node_coord = True
# do something
continue
elif "DEMAND_SECTION" in line:
node_demand = True
node_coord = False
# do something
continue
elif "CAPACITY" in line:
self.capacity = line[9:-1] #-1 para remover o \n
continue
else:
if node_coord == True:
nodes_count = nodes_count + 1
(deposit, x, y) = line.split()
self.nodes.append([deposit, int(x), int(y), 0])
elif node_demand == True:
self.nodes_count = nodes_count
(deposit, quantity) = line.split()
self.nodes[int(deposit)-1] = [self.nodes[int(deposit)-1][0], self.nodes[int(deposit)-1][1], self.nodes[int(deposit)-1][2], int(quantity)]
continue
continue
self.deposit = self.nodes[0]
self.nodes.pop(0)
def retrieve_data(self,index):
return self.nodes[int(index)][1],self.nodes[int(index)][2]
def printStatus(self):
print("STATUS")
print("======")
print("Capacidade = ", self.capacity)
print("Deposito = ", self.deposit)
print("Distancia total = ", self.total_distance)
print("Trucks = ", self.trucks)
total = 0
for i in range(len(self.nodes)):
if len(self.nodes[i]) == 5:
total += 1
print("Quantidade Depositos = ", len(self.nodes))
print("Nós percorridos =", total)
def init_trucks(self):
for i in range(self.truck_count):
self.trucks.append(int(self.capacity))
def get_first_not_flagged_node(self):
pos = 0
for node in self.nodes:
if len(node) != 5:
return pos
pos = pos + 1
def count_available_nodes(self):
count = 0
for node in self.nodes:
if len(node) != 5:
count += 1
return count
def find_random_node(self, truck):
node = None
has_node = self.count_available_nodes() > 0
smallest_node = self.get_lowest_available_node()
while node is None and has_node and truck >= smallest_node[3]:
pos = randint(0, len(self.nodes)-1)
if len(self.nodes[pos]) != 5 and truck-self.nodes[pos][3] >= 0:
self.nodes[pos].append(False)
return pos
return None
def get_lowest_available_node(self):
smallest_pos = self.get_first_not_flagged_node()
if smallest_pos is None:
return None
smallest_node = self.nodes[smallest_pos]
pos = 0
for node in self.nodes:
if node[3] < smallest_node[3] and len(node) != 5:
smallest_node = node
smallest_pos = pos
pos += 1
return smallest_node
def init_solution(self):
all_solutions = []
for i in range(len(self.trucks)):
current_solution = []
truck = self.trucks[i]
has_nodes = True
while has_nodes:
node = self.find_random_node(truck)
if node is None:
has_nodes = False # has no more close nodes with enough capacity to supply
else:
current_solution.append(node)
truck = truck - int(self.get_cost(node)) # update truck capacity
self.trucks[i] = truck # update truck capacity
all_solutions.append(current_solution)
return all_solutions
def orderSolution(self, all_solutions):
for solution_pos in range(len(all_solutions)):
solution = all_solutions[solution_pos]
for i in range(len(solution)):
best_change_total = None
best_change_pos = None
for j in range(i+1, len(solution)):
distance_a_before = self.calc_distance_around_node_in_solution(solution[i], solution, i)
distance_b_before = self.calc_distance_around_node_in_solution(solution[j], solution, j)
total_before = distance_a_before + distance_b_before
# do change
aux = solution[i]
solution[i] = solution[j]
solution[j] = aux
# calc the same distance
distance_a_after = self.calc_distance_around_node_in_solution(solution[i], solution, i)
distance_b_after = self.calc_distance_around_node_in_solution(solution[j], solution, j)
total_after = distance_a_after + distance_b_after
# change back because original order
aux = solution[i]
solution[i] = solution[j]
solution[j] = aux
if total_after < total_before:
if best_change_total is None:
best_change_pos = j
best_change_total = total_after
elif best_change_total > total_after:
best_change_pos = j
best_change_total = total_after
if best_change_total is not None:
aux = solution[i]
solution[i] = solution[best_change_pos]
solution[best_change_pos] = aux
return all_solutions
def changeBetweenTrucks(self, all_solutions):
for solution_pos in range(len(all_solutions)):
solution = all_solutions[solution_pos]
best_change_total = None
best_change_pos_current_solution = None
best_change_pos_next_solution = None
best_change_solution = None
for next_solution_pos in range(solution_pos+1, len(all_solutions)):
next_solution = all_solutions[next_solution_pos]
best_change_total = None
best_change_pos_current_solution = None
best_change_pos_next_solution = None
best_change_solution = None
for i in range(len(solution)):
for j in range(len(next_solution)):
if (self.trucks[solution_pos] + self.get_cost(solution[i])) - self.get_cost(next_solution[j]) >= 0 and (self.trucks[next_solution_pos] + self.get_cost(next_solution[j]) - self.get_cost(solution[i])) >= 0: # check if truck has enough capacity
distance_a_before = self.calc_distance_around_node_in_solution(next_solution[j], solution, i)
distance_b_before = self.calc_distance_around_node_in_solution(solution[i], next_solution, j)
total_before = distance_a_before + distance_b_before
aux = solution[i]
solution[i] = next_solution[j]
next_solution[j] = aux
distance_a_after = self.calc_distance_around_node_in_solution(next_solution[j], solution, i)
distance_b_after = self.calc_distance_around_node_in_solution(solution[i], next_solution, j)
total_after = distance_a_after + distance_b_after
aux = solution[i]
solution[i] = next_solution[j]
next_solution[j] = aux
if total_after < total_before: # check if is better with change
if best_change_total is None:
best_change_pos_current_solution = i
best_change_pos_next_solution = j
best_change_total = total_after
best_change_solution = next_solution_pos
elif best_change_total > total_after:
best_change_pos_current_solution = i
best_change_pos_next_solution = j
best_change_total = total_after
best_change_solution = next_solution_pos
if best_change_total is not None:
# update truck capacity
self.trucks[solution_pos] += self.get_cost(solution[best_change_pos_current_solution]) - self.get_cost(all_solutions[best_change_solution][best_change_pos_next_solution])
self.trucks[best_change_solution] += self.get_cost(all_solutions[best_change_solution][best_change_pos_next_solution]) - self.get_cost(solution[best_change_pos_current_solution])
# change
aux = solution[best_change_pos_current_solution]
solution[best_change_pos_current_solution] = all_solutions[best_change_solution][best_change_pos_next_solution]
next_solution[best_change_pos_next_solution] = aux
return all_solutions
def hill_climbing(self):
all_solutions = self.solution
for i in range(self.repetitions):
all_solutions = self.orderSolution(all_solutions)
all_solutions = self.changeBetweenTrucks(all_solutions)
return all_solutions
def calc_cost_route(self, nodes):
total = 0
index = 0
selected_nodes = []
for i in nodes:
selected_nodes.append(self.nodes[i])
for node in selected_nodes:
if index == 0:
total += self.calc_cost(self.deposit[1], node[1], self.deposit[2], node[2])
if index+1 < len(nodes):
total += self.calc_cost(node[1], self.nodes[index+1][1], node[2], self.nodes[index+1][2])
elif index == len(nodes)-1:
total += self.calc_cost(node[1], self.deposit[1], node[2], self.deposit[2])
else:
total += self.calc_cost(node[1], self.nodes[index+1][1], node[2], self.nodes[index+1][2])
index += 1
return total
def calc_total_distance(self):
total = 0
for solution in self.solution:
total += self.calc_cost_route(solution)
return total
def calc_distance_around_node_in_solution(self, node_pos, solution, solution_pos):
total_distance = 0
if solution_pos == 0:
total_distance += self.calc_distance(self.nodes[node_pos], self.deposit)
else:
total_distance += self.calc_distance(self.nodes[node_pos], self.nodes[solution[solution_pos - 1]])
if solution_pos == len(solution)-1:
total_distance += self.calc_distance(self.nodes[node_pos], self.deposit)
else:
total_distance += self.calc_distance(self.nodes[node_pos], self.nodes[solution[solution_pos + 1]])
return total_distance
def calc_distance(self, node_a, node_b):
return self.calc_cost(node_a[1], node_b[1], node_a[2], node_b[2])
def calc_cost(self, xa, xb, ya, yb):
return math.sqrt(((xa-xb) * (xa-xb))+((ya-yb) * (ya-yb)))
def get_cost(self, pos):
if not self.nodes:
return None
else:
return self.nodes[pos][3]
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--file", dest="filename", help="Path of file", default="CVRP/eil33.vrp.txt",
type="string")
parser.add_option("--trucks", dest="truck_count", help="Quantity of trucks", default="4",
type="int")
parser.add_option("--repetitions", dest="repetitions", help="Number of repetitions", default="1000",
type="int")
(options, args) = parser.parse_args()
CVRP = cvrp(options.filename, options.truck_count, options.repetitions)
| 44.09772 | 269 | 0.546462 |
af74dcf11bf85c03d8f850990d060d38d1fef04f
| 15,226 |
py
|
Python
|
compare_lrs.py
|
TorHou/ContigAnalysisScripts
|
7cd240babf6c8889a25604d3cfb262e241fcfa11
|
[
"MIT"
] | 1 |
2018-07-27T00:54:35.000Z
|
2018-07-27T00:54:35.000Z
|
compare_lrs.py
|
DiltheyLab/ContigAnalysisScripts
|
7cd240babf6c8889a25604d3cfb262e241fcfa11
|
[
"MIT"
] | null | null | null |
compare_lrs.py
|
DiltheyLab/ContigAnalysisScripts
|
7cd240babf6c8889a25604d3cfb262e241fcfa11
|
[
"MIT"
] | null | null | null |
from argparse import ArgumentParser
from Bio import SeqIO
import sys
import pickle
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
from random import sample
import logging
from logging import info
from itertools import combinations
from collections import defaultdict
import os.path
logging.basicConfig(filename='info.log',level=logging.DEBUG)
parser = ArgumentParser()
parser.add_argument("efile", help="Error rate file")
parser.add_argument("summaryfile", help="Contig Distance Summary file")
parser.add_argument("contigfile", help="Contig File")
parser.add_argument("linename", help="Name of cell line")
parser.add_argument("--blacklist", help="Blacklist File")
parser.add_argument("--overwrite", help="Overwrite preexisting distance matrix.", action = "store_true")
#parser.add_argument("--maxdev", help="Maximal deviation", type=float, default=2.0)
parser.add_argument("--mindepth", help="Minimal depth", type=int, default=20)
parser.add_argument("--mincontigs", help="Minimal number of contigs for long read to be considered", type=int, default=1)
parser.add_argument("--unwanted_contigs", help="If given the contigs in this file will not be considered.")
args = parser.parse_args()
reads = {}
greads = {}
cgreads = []
contigs = {}
for read in SeqIO.parse(args.contigfile, "fasta"):
contigs[read.id] = len(read.seq)
#unwanted contigs
unwanted_contigs = set()
if args.unwanted_contigs:
with open(args.unwanted_contigs) as f:
for line in f:
ctg = line.strip()
unwanted_contigs.add(ctg)
unwanted_contigs.add("b_" + ctg) # usual prefix for repeated contigs
unwanted_contigs.add("c_" + ctg)
sr_distances = {}
with open(args.summaryfile) as f:
for line in f:
sline = line.split()
ori1 = sline[0].split("_")[0][0]
ori2 = sline[0].split("_")[1][0]
if ori1 != ori2:
continue
if ori1 == "-":
continue
[ctg1, ctg2] = sline[0].replace("+","").replace("-","").split("_")
if sline[1] == "NA":
continue
if float(sline[2]) < args.mindepth:
continue
if ctg1 in unwanted_contigs or ctg2 in unwanted_contigs:
continue
moddist = float(sline[1])
# sanity check on the distance
if moddist + contigs[ctg1] < 0 or moddist + contigs[ctg2] < 0 :
continue
if ctg1 in sr_distances:
sr_distances[ctg1][ctg2] = moddist
else:
sr_distances[ctg1] = {ctg2: moddist}
# some sr distances suck
'''
sr_distances["1046APD"].pop("1550APD")
sr_distances["1488APD"].pop("1553APD")
sr_distances["169APD"].pop("530APD")
sr_distances["2137APD"].pop("530APD")
sr_distances["367APD"].pop("2038APD")
sr_distances["367APD"].pop("398APD")
sr_distances["544APD"].pop("1923APD")
sr_distances["582APD"].pop("635APD")
'''
#blacklist of long reads
blacklist = {}
complete_read = set()
if args.blacklist:
with open(args.blacklist) as f:
for line in f:
idx, ctg = line.strip().split()[0:2]
if ctg == "all":
complete_read.add(idx)
else:
blacklist[idx] = ctg
dm_path = "/home/houwaart/Projects/ImmunoPore/APD/distance_matrix.pkl"
# if distance matrix exists there is no need to calculate
if args.overwrite or not os.path.isfile(dm_path):
# nanopore reads
lreads = {}
with open(args.efile) as f:
for line in f:
#sline = line.split()
[rid, ctg, t2, t3, t4, scr, ecr, lenr, strand, scc, ecc, lenc, t12, t13, t14, t15, t16] = line.split()
data = {"contig":ctg,"strand":int(strand),"scr":int(scr),"ecr":int(ecr),"scc":int(scc),"ecc":int(ecc),"lenc":int(lenc)}
if args.blacklist:
if rid in blacklist:
if blacklist[rid] == ctg:
continue
elif rid in complete_read:
continue
if args.unwanted_contigs:
if ctg in unwanted_contigs:
continue
if rid in lreads:
lreads[rid]["maps"].append(data)
if int(ecr) > lreads[rid]["rm_ecr"]:
lreads[rid]["rm_ecr"] = int(ecr)
if int(scr) < lreads[rid]["lm_scr"]:
lreads[rid]["lm_scr"] = int(scr)
else:
lreads[rid] = {}
lreads[rid]["length"] = int(lenr)
lreads[rid]["maps"] = [data]
lreads[rid]["rm_ecr"] = int(ecr)
lreads[rid]["lm_scr"] = int(scr)
# these reads are problematic
def has_contigs_double(lr1):
seen_ctgs = set()
for ctg in lr1["maps"]:
if ctg["contig"] in seen_ctgs:
return ctg
else:
if ctg["contig"].endswith(args.linename):
seen_ctgs.add(ctg["contig"])
return ""
#filter for interesting np reads
greads = {}
for rid,lr in lreads.items():
double_ctg = has_contigs_double(lr)
if double_ctg:
info(rid + " has contigs double " + str(double_ctg))
continue
counter = 0
for item in lr["maps"]:
if item["contig"].endswith(args.linename):
counter +=1
if counter >= args.mincontigs:
greads[rid] = lr
break
lrids = []
# turn reads around if necessary
for rid, lr in greads.items():
bw = 0
fw = 0
lrids.append(rid)
for mapping in lr["maps"]:
if mapping["contig"].endswith(args.linename):
if mapping["strand"] == 1:
bw += 1
elif mapping["strand"] == 0:
fw += 1
else:
raise ValueError("strand: " + str(mapping["strand"]))
if bw > fw:
for mapping in lr["maps"]:
if mapping["contig"].endswith(args.linename):
mapping["strand"] = 1 if mapping["strand"] == 0 else 0
tmp = mapping["scr"]
mapping["scr"] = lr["length"] - mapping["ecr"]
mapping["ecr"] = lr["length"] - tmp
tmp = mapping["scc"]
mapping["scc"] = mapping["lenc"] - mapping["ecc"]
mapping["ecc"] = mapping["lenc"] - tmp
# turn around and redefine wrong contigs
for mapping in lr["maps"]:
if mapping["contig"].endswith(args.linename):
if mapping["strand"] == 1: #define a new contigname and turn it around
mapping["contig"] = mapping["contig"] + "rc"
mapping["scc"] = mapping["lenc"] - mapping["ecc"]
mapping["ecc"] = mapping["lenc"] - mapping["scc"]
mapping["strand"] = 0
def compare_longreads(lr1, lr2):
l1c = []
l2c = []
for m in lr1["maps"]:
cn = m["contig"]
if cn.endswith(args.linename):
l1c.append(cn)
for m in lr2["maps"]:
cn = m["contig"]
if cn.endswith(args.linename):
l2c.append(cn)
common_ctgs = set(l1c).intersection(set(l2c))
return common_ctgs
def get_contig_info(lr,ctg):
for maps in lr["maps"]:
if maps["contig"] == ctg:
return maps
def get_distances(lr1,lr2, common_ctgs):
dists = []
for ctg in common_ctgs:
m1 = get_contig_info(lr1,ctg)
m2 = get_contig_info(lr2,ctg)
dists.append((m1["scr"]-m1["scc"]) - (m2["scr"]-m2["scc"]))
return dists
def show_distances(lr1,lr2,lr1id,lr2id, common_ctgs):
for ctg in common_ctgs:
m1 = get_contig_info(lr1,ctg)
m2 = get_contig_info(lr2,ctg)
dist = ((m1["scr"]-m1["scc"]) - (m2["scr"]-m2["scc"]))
#print(lr1id + " + " + lr2id + " - " + ctg + ": " + str(dist))
#lr_dist = defaultdict(lambda:([],[]))
lr_dists = {}
#initialize matrix
for lid in lrids:
lr_dists[lid] = {lid:([0],[0])}
# get pair of overlapping read
#lread1, lread2 = sample(list(greads.values()), 2)
#while len(compare_longreads(lread1,lread2)) == 0:
# lread1, lread2 = sample(list(greads.values()), 2)
#common_ctgs = compare_longreads(lread1,lread2)
all_dists = []
for lrs in combinations(greads.keys(), 2):
lr1 = greads[lrs[0]]
lr2 = greads[lrs[1]]
common_ctgs = compare_longreads(lr1, lr2)
if len(common_ctgs) > 0:
dists = get_distances(lr1, lr2, common_ctgs)
lr_dists[lrs[0]][lrs[1]]=(dists, [])
ndists =[]
for d in dists:
ndists.append(-d)
lr_dists[lrs[1]][lrs[0]] = (ndists, [])
stdev = (np.std(dists))
if stdev > 500:
show_distances(lr1, lr2, lrs[0], lrs[1], common_ctgs)
#print("-" * 200)
#print(lrs[0] + " + " + lrs[1])
all_dists.append(dists)
#print(dists)
# short reads !
for m1 in lr1["maps"]:
ctg1 = m1["contig"]
for m2 in lr2["maps"]:
ctg2 = m2["contig"]
if ctg1 in sr_distances:
if ctg2 in sr_distances[ctg1]:
sr_dist = m1["ecr"] + (contigs[ctg1] - m1["ecc"]) + sr_distances[ctg1][ctg2] - (m2["scr"] - m2["scc"])
if lrs[1] in lr_dists[lrs[0]]:
curr_dist = np.mean(lr_dists[lrs[0]][lrs[1]][0])
if abs(sr_dist - curr_dist) > 2000:
print("\t".join(["curr_dist: " + str(curr_dist), "sr_dist: " + str(sr_dist), "ctg1: " + ctg1, "ctg2: " + ctg2, lrs[0], lrs[1]]))
lr_dists[lrs[0]][lrs[1]][1].append(sr_dist)
else:
#print(lrs[0] + " + " + lrs[1] + ": " + ctg1 + " " + ctg2)
lr_dists[lrs[0]][lrs[1]]= ([],[sr_dist])
if lrs[0] in lr_dists[lrs[1]]:
lr_dists[lrs[1]][lrs[0]][1].append(-sr_dist)
else:
lr_dists[lrs[1]][lrs[0]]= ([],[-sr_dist])
#save the distance matrix
with open(dm_path, 'wb') as f:
pickle.dump(lr_dists, f, pickle.HIGHEST_PROTOCOL)
else:
with open(dm_path, 'rb') as f:
lr_dists = pickle.load(f)
count1 = 0
count2 = 0
for lrid,lrdists in lr_dists.items():
for lrid2,dists in lrdists.items():
if len(dists[1]) > 0:
count2 += 1
if len(dists[0]) > 0:
count1 += 1
#print("interesting: " + str(lrid) + " " + str(lrid2) + " " + str(dists[1]))
if len(dists[0]) > 1 and len(dists[1]) > 1:
#if abs(np.mean(dists[0]) - np.mean(dists[1])) > 50:
# print("\t".join([str(lrid),str(lrid2),str(np.mean(dists[0])), str(np.mean(dists[1]))]))
if abs(np.mean(dists[0]) - np.mean(dists[1])) > 500:
##print("\t".join([str(lrid),str(lrid2),str(np.mean(dists[0])), str(np.mean(dists[1]))]))
#print("\t".join(["","",str(dists[0]), str(dists[1])]))
pass
print("non-zero entries longreads: " + str(count1))
print("non-zero entries shortreads : " + str(count2))
print("length: " + str(len(lr_dists)))
#for lrid,dists in lr_dists.items():
# print("-"*200)
# print(lrid)
# print(dists)
# and now let's build a simple matrix
# taking an arbitrary but fixed ordering
lr_keys = lr_dists.keys()
matrix = []
for nr1, lr1 in enumerate(lr_keys):
row = []
for nr2, lr2 in enumerate(lr_keys):
if lr1 in lr_dists:
if lr2 in lr_dists[lr1]:
row.append(1)
else:
row.append(0)
else:
row.append(0)
matrix.append(row)
plt.imsave('connectedness.png', np.array(matrix).reshape(len(lr_keys),len(lr_keys)), cmap=cm.gray)
#plt.plot(np.array(matrix), cmap=cm.gray)
#plt.show()
print(lr_dists["0cf11d19-fcbf-4685-901f-32c4259eaf85"])
cluster = 0
unvisited_nodes = set(lr_keys)
while unvisited_nodes:
cluster += 1
print("Cluster " + str(cluster))
start_node = unvisited_nodes.pop()
current_cluster = [start_node]
current_index = 0
while(current_index != len(current_cluster)):
for lr2 in lr_dists[current_cluster[current_index]]:
if lr2 not in current_cluster and lr2 in unvisited_nodes:
current_cluster.append(lr2)
unvisited_nodes.remove(lr2)
current_index += 1
print(current_cluster)
#sys.exit(0)
# get pairs of overlapping reads
#plt.hist(devs,150)
#plt.yscale('log', nonposy='clip')
#plt.show()
pp = PdfPages("distances.pdf")
entries = 0
nz_entries = 0
maxv = 3000
for lrid,lrdists in lr_dists.items():
for lrid2,dists in lrdists.items():
if lrid == lrid2:
continue
if len(dists[1]) > 0 or len(dists[0]) > 0:
entries += 1
if entries % 1000 == 0:
print("entry: " + str(entries))
#nz_entries += 1
#print(str(entries) + "\t" + str(nz_entries))
nm = np.mean(dists[1])
distances_sr = [x-nm for x in dists[1]]
nm_lr = np.mean(dists[0])
distances_lr = [x-nm_lr for x in dists[0]]
for idx,distance in enumerate(distances_sr):
if distance < -maxv:
distances_sr[idx] = -maxv
if distance > maxv:
distances_sr[idx] = maxv
for idx,distance in enumerate(distances_lr):
if distance < -maxv:
distances_lr[idx] = -maxv
if distance > maxv:
distances_lr[idx] = maxv
plot1 = plt.figure()
#plt.subplot(1, 3, 1)
#plt.hist(distances_sr, range(-maxv, maxv+1, 250))
#plt.title(lrid + " +\n" + lrid2)
label = "short_reads mean: " + '{0: >#016.1f}'.format((float(nm)))
#plt.xlabel(label)
#plt.subplot(1, 3, 2)
#plt.hist(distances_lr, range(-maxv, maxv+1, 250))
label += "\nlong_reads mean: " + '{0: >#016.1f}'.format((float(nm_lr)))
plt.xlabel(label)
#plt.subplot(1, 3, 3)
plt.hist(distances_lr, range(-maxv, maxv+1, 250), alpha = 0.4)
plt.hist(distances_sr, range(-maxv, maxv+1, 250), alpha = 0.4)
#plt.xlabel("long_reads mean: " + '{0: >#016.1f}'.format((float(nm_lr))))
pp.savefig(plot1)
plt.close(plot1)
# sys.exit(0)
pp.close()
devs = []
for dists in all_dists:
stdev = (np.std(dists))
if stdev < 50:
pass
# print(dists)
devs.append(stdev)
| 35.491841 | 160 | 0.530211 |
340e6dcf9bcc38789dbf784618dc000bed60cf27
| 1,639 |
py
|
Python
|
scaffold/identical_murcko_scaffold.py
|
fujirock/Reinvent
|
9c57636f9d32b4ce5b75670f43906a70d5daf886
|
[
"MIT"
] | 4 |
2021-05-11T05:34:01.000Z
|
2022-03-30T10:04:21.000Z
|
scaffold/identical_murcko_scaffold.py
|
prasannavd/Reinvent
|
ca02ebee8d8ed83223c55f4a1dd1b3fbc2359616
|
[
"MIT"
] | null | null | null |
scaffold/identical_murcko_scaffold.py
|
prasannavd/Reinvent
|
ca02ebee8d8ed83223c55f4a1dd1b3fbc2359616
|
[
"MIT"
] | 2 |
2021-06-01T11:56:10.000Z
|
2021-10-05T04:33:56.000Z
|
from copy import deepcopy
import numpy as np
from rdkit import Chem
from rdkit.Chem.Scaffolds import MurckoScaffold
from scaffold.scaffold_filters import ScaffoldFilter
from scaffold.scaffold_parameters import ScaffoldParameters
from scoring.score_summary import FinalSummary
from utils.smiles import convert_to_rdkit_smiles
class IdenticalMurckoScaffold(ScaffoldFilter):
"""Penalizes compounds based on exact Murcko Scaffolds previously generated."""
def __init__(self, parameters: ScaffoldParameters):
super().__init__(parameters)
def score(self, score_summary: FinalSummary) -> np.array:
score_summary = deepcopy(score_summary)
scores = score_summary.total_score
smiles = score_summary.scored_smiles
for i in score_summary.valid_idxs:
smile = convert_to_rdkit_smiles(smiles[i])
scaffold = self._calculate_scaffold(smile)
scores[i] = 0 if self._smiles_exists(scaffold, smile) else scores[i]
if scores[i] >= self.parameters.minscore:
self._add_to_memory(i, scores[i], smile, scaffold, score_summary.scaffold_log)
scores[i] = self._penalize_score(scaffold, scores[i])
return scores
def _calculate_scaffold(self, smile):
mol = Chem.MolFromSmiles(smile)
if mol:
try:
scaffold = MurckoScaffold.GetScaffoldForMol(mol)
scaffold_smiles = Chem.MolToSmiles(scaffold, isomericSmiles=False)
except ValueError:
scaffold_smiles = ''
else:
scaffold_smiles = ''
return scaffold_smiles
| 37.25 | 94 | 0.691275 |
6c34ce41129b84d2fb74ad9412435dde9de07454
| 5,639 |
py
|
Python
|
pexpect/replwrap.py
|
dolfinus/pexpect
|
3453ea9b8b326179cf720351001e64c7ea6b07bc
|
[
"0BSD"
] | 2,132 |
2015-01-02T12:48:45.000Z
|
2022-03-28T05:32:54.000Z
|
pexpect/replwrap.py
|
dolfinus/pexpect
|
3453ea9b8b326179cf720351001e64c7ea6b07bc
|
[
"0BSD"
] | 536 |
2015-01-02T19:42:34.000Z
|
2022-03-10T16:40:35.000Z
|
pexpect/replwrap.py
|
dolfinus/pexpect
|
3453ea9b8b326179cf720351001e64c7ea6b07bc
|
[
"0BSD"
] | 517 |
2015-01-07T02:09:44.000Z
|
2022-03-26T14:18:23.000Z
|
"""Generic wrapper for read-eval-print-loops, a.k.a. interactive shells
"""
import os.path
import signal
import sys
import pexpect
PY3 = (sys.version_info[0] >= 3)
if PY3:
basestring = str
PEXPECT_PROMPT = u'[PEXPECT_PROMPT>'
PEXPECT_CONTINUATION_PROMPT = u'[PEXPECT_PROMPT+'
class REPLWrapper(object):
"""Wrapper for a REPL.
:param cmd_or_spawn: This can either be an instance of :class:`pexpect.spawn`
in which a REPL has already been started, or a str command to start a new
REPL process.
:param str orig_prompt: The prompt to expect at first.
:param str prompt_change: A command to change the prompt to something more
unique. If this is ``None``, the prompt will not be changed. This will
be formatted with the new and continuation prompts as positional
parameters, so you can use ``{}`` style formatting to insert them into
the command.
:param str new_prompt: The more unique prompt to expect after the change.
:param str extra_init_cmd: Commands to do extra initialisation, such as
disabling pagers.
"""
def __init__(self, cmd_or_spawn, orig_prompt, prompt_change,
new_prompt=PEXPECT_PROMPT,
continuation_prompt=PEXPECT_CONTINUATION_PROMPT,
extra_init_cmd=None):
if isinstance(cmd_or_spawn, basestring):
self.child = pexpect.spawn(cmd_or_spawn, echo=False, encoding='utf-8')
else:
self.child = cmd_or_spawn
if self.child.echo:
# Existing spawn instance has echo enabled, disable it
# to prevent our input from being repeated to output.
self.child.setecho(False)
self.child.waitnoecho()
if prompt_change is None:
self.prompt = orig_prompt
else:
self.set_prompt(orig_prompt,
prompt_change.format(new_prompt, continuation_prompt))
self.prompt = new_prompt
self.continuation_prompt = continuation_prompt
self._expect_prompt()
if extra_init_cmd is not None:
self.run_command(extra_init_cmd)
def set_prompt(self, orig_prompt, prompt_change):
self.child.expect(orig_prompt)
self.child.sendline(prompt_change)
def _expect_prompt(self, timeout=-1, async_=False):
return self.child.expect_exact([self.prompt, self.continuation_prompt],
timeout=timeout, async_=async_)
def run_command(self, command, timeout=-1, async_=False):
"""Send a command to the REPL, wait for and return output.
:param str command: The command to send. Trailing newlines are not needed.
This should be a complete block of input that will trigger execution;
if a continuation prompt is found after sending input, :exc:`ValueError`
will be raised.
:param int timeout: How long to wait for the next prompt. -1 means the
default from the :class:`pexpect.spawn` object (default 30 seconds).
None means to wait indefinitely.
:param bool async_: On Python 3.4, or Python 3.3 with asyncio
installed, passing ``async_=True`` will make this return an
:mod:`asyncio` Future, which you can yield from to get the same
result that this method would normally give directly.
"""
# Split up multiline commands and feed them in bit-by-bit
cmdlines = command.splitlines()
# splitlines ignores trailing newlines - add it back in manually
if command.endswith('\n'):
cmdlines.append('')
if not cmdlines:
raise ValueError("No command was given")
if async_:
from ._async import repl_run_command_async
return repl_run_command_async(self, cmdlines, timeout)
res = []
self.child.sendline(cmdlines[0])
for line in cmdlines[1:]:
self._expect_prompt(timeout=timeout)
res.append(self.child.before)
self.child.sendline(line)
# Command was fully submitted, now wait for the next prompt
if self._expect_prompt(timeout=timeout) == 1:
# We got the continuation prompt - command was incomplete
self.child.kill(signal.SIGINT)
self._expect_prompt(timeout=1)
raise ValueError("Continuation prompt found - input was incomplete:\n"
+ command)
return u''.join(res + [self.child.before])
def python(command=sys.executable):
"""Start a Python shell and return a :class:`REPLWrapper` object."""
return REPLWrapper(command, u">>> ", u"import sys; sys.ps1={0!r}; sys.ps2={1!r}")
def bash(command="bash"):
"""Start a bash shell and return a :class:`REPLWrapper` object."""
bashrc = os.path.join(os.path.dirname(__file__), 'bashrc.sh')
child = pexpect.spawn(command, ['--rcfile', bashrc], echo=False,
encoding='utf-8')
# If the user runs 'env', the value of PS1 will be in the output. To avoid
# replwrap seeing that as the next prompt, we'll embed the marker characters
# for invisible characters in the prompt; these show up when inspecting the
# environment variable, but not when bash displays the prompt.
ps1 = PEXPECT_PROMPT[:5] + u'\\[\\]' + PEXPECT_PROMPT[5:]
ps2 = PEXPECT_CONTINUATION_PROMPT[:5] + u'\\[\\]' + PEXPECT_CONTINUATION_PROMPT[5:]
prompt_change = u"PS1='{0}' PS2='{1}' PROMPT_COMMAND=''".format(ps1, ps2)
return REPLWrapper(child, u'\\$', prompt_change,
extra_init_cmd="export PAGER=cat")
| 43.045802 | 87 | 0.648342 |
1f8b6e5d36067a82d06886edfbb99fc6684c114f
| 1,057 |
py
|
Python
|
tests/common/plugins/sanity_check/constants.py
|
dipalipatel25/sonic-mgmt
|
54f52b689e817aa9ad6955643f9271dc40fcfc02
|
[
"Apache-2.0"
] | null | null | null |
tests/common/plugins/sanity_check/constants.py
|
dipalipatel25/sonic-mgmt
|
54f52b689e817aa9ad6955643f9271dc40fcfc02
|
[
"Apache-2.0"
] | null | null | null |
tests/common/plugins/sanity_check/constants.py
|
dipalipatel25/sonic-mgmt
|
54f52b689e817aa9ad6955643f9271dc40fcfc02
|
[
"Apache-2.0"
] | null | null | null |
PRINT_LOGS = {
"version": "show version",
"images": "sonic_installer list",
"docker": "docker ps -a",
"interfaces": "show interface status",
"ip": "show ip interface",
"neigh": "ip neigh",
"bgp": "show bgp summary",
"routes": "ip route | wc -l"
}
# Recover related definitions
RECOVER_METHODS = {
"config_reload": {"cmd": "config reload -y", "reboot": False, "adaptive": False, 'recover_wait': 60},
"load_minigraph": {"cmd": "config load_minigraph -y", "reboot": False, "adaptive": False, 'recover_wait': 60},
"reboot": {"cmd": "reboot", "reboot": True, "adaptive": False, 'recover_wait': 120},
"warm_reboot": {"cmd": "warm-reboot", "reboot": True, "adaptive": False, 'recover_wait': 120},
"fast_reboot": {"cmd": "fast_reboot", "reboot": True, "adaptive": False, 'recover_wait': 120},
"adaptive": {"cmd": None, "reboot": False, "adaptive": True, 'recover_wait': 30},
} # All supported recover methods
SUPPORTED_CHECK_ITEMS = ["services", "interfaces", "dbmemory"] # Supported checks
| 44.041667 | 114 | 0.624409 |
fa3af6261181f79fe0dcec5e7a150c2de8dbef25
| 1,756 |
py
|
Python
|
MiniTourn_Triple/Player.py
|
jeffreyzli/pokerbot-2017
|
df2aa31d6aaf0e3162d24ae5f4c2a918ab19831f
|
[
"MIT"
] | 1 |
2017-01-18T21:25:21.000Z
|
2017-01-18T21:25:21.000Z
|
BetFoldBot/Player.py
|
jeffreyzli/pokerbot-2017
|
df2aa31d6aaf0e3162d24ae5f4c2a918ab19831f
|
[
"MIT"
] | null | null | null |
BetFoldBot/Player.py
|
jeffreyzli/pokerbot-2017
|
df2aa31d6aaf0e3162d24ae5f4c2a918ab19831f
|
[
"MIT"
] | 3 |
2017-02-06T04:35:02.000Z
|
2020-03-08T18:56:25.000Z
|
import argparse
import socket
import sys
from GameData import GameData
import BetFoldLogic as BetFold
class Player:
def run(self, input_socket):
f_in = input_socket.makefile()
while True:
data = f_in.readline().strip()
if not data:
print "Gameover, engine disconnected."
break
data_list = data.split()
word = data_list[0]
if word == "NEWGAME":
game_data = GameData(data_list[1], data_list[2], data_list[3], data_list[4])
elif word == "NEWHAND":
game_data.new_hand(data_list)
elif word == "GETACTION":
game_data.get_action(data_list)
action = BetFold.action(game_data)
s.send(action + "\n")
elif word == "REQUESTKEYVALUES":
# At the end, the engine will allow your bot save key/value pairs.
# Send FINISH to indicate you're done.
s.send("FINISH\n")
# Clean up the socket.
s.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='A Pokerbot.', add_help=False, prog='pokerbot')
parser.add_argument('-h', dest='host', type=str, default='localhost', help='Host to connect to, defaults to localhost')
parser.add_argument('port', metavar='PORT', type=int, help='Port on host to connect to')
args = parser.parse_args()
# Create a socket connection to the engine.
print 'Connecting to %s:%d' % (args.host, args.port)
try:
s = socket.create_connection((args.host, args.port))
except socket.error as e:
print 'Error connecting! Aborting'
exit()
bot = Player()
bot.run(s)
| 30.275862 | 123 | 0.585991 |
25e2c4e0066ee8c549f175ff5c6a856ea47779ca
| 16,730 |
py
|
Python
|
neutronclient/neutron/v2_0/providernet.py
|
teresa-ho/stx-python-neutronclient
|
35ea6c2c96cbf98755a82cb7c19138648552b778
|
[
"Apache-2.0"
] | null | null | null |
neutronclient/neutron/v2_0/providernet.py
|
teresa-ho/stx-python-neutronclient
|
35ea6c2c96cbf98755a82cb7c19138648552b778
|
[
"Apache-2.0"
] | 2 |
2018-11-01T21:50:21.000Z
|
2018-11-13T21:40:09.000Z
|
neutronclient/neutron/v2_0/providernet.py
|
teresa-ho/stx-python-neutronclient
|
35ea6c2c96cbf98755a82cb7c19138648552b778
|
[
"Apache-2.0"
] | 3 |
2018-11-01T17:55:23.000Z
|
2018-11-23T19:16:06.000Z
|
# Copyright 2014 OpenStack LLC.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2014 Wind River Systems, Inc.
#
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import argparse
import itertools
import logging
from oslo_serialization import jsonutils
from neutronclient.common import utils
from neutronclient.neutron import v2_0 as neutronV20
def _format_vxlan_attributes(vxlan):
attributes = dict((k, vxlan[k]) for k in vxlan.keys())
return '{}'.format(jsonutils.dumps(attributes))
class ListProviderNetType(neutronV20.ListCommand):
"""List provider network types."""
resource = 'providernet_type'
log = logging.getLogger(__name__ + '.ListProviderNetType')
list_columns = ['type', 'description']
def _format_ranges(providernet):
try:
ranges = []
for r in providernet['ranges']:
ranges.append(dict((k, r[k]) for k in r.keys() if k in
['name', 'minimum', 'maximum']))
except Exception as e:
return 'error={}'.format(e)
return '\n'.join(["{}".format(jsonutils.dumps(r)) for r in ranges])
class ListProviderNet(neutronV20.ListCommand):
"""List provider networks."""
resource = 'providernet'
log = logging.getLogger(__name__ + '.ListProviderNet')
_formatters = {'ranges': _format_ranges, }
list_columns = ['id', 'name', 'type', 'mtu', 'ranges']
class ShowProviderNet(neutronV20.ShowCommand):
"""Show information for a given provider network."""
resource = 'providernet'
log = logging.getLogger(__name__ + '.ShowProviderNet')
allow_names = True
json_indent = 5
class CreateProviderNet(neutronV20.CreateCommand):
"""Create a provider network."""
resource = 'providernet'
log = logging.getLogger(__name__ + '.CreateProviderNet')
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
dest='description',
help='Set user-defined description field for a provider network')
parser.add_argument(
'--type', required=True,
dest='type', default='flat',
choices=['flat', 'vlan', 'vxlan'],
help='Set network type for a provider network')
parser.add_argument(
'--mtu', dest='mtu', type=int,
help='Maximum transmit unit on provider network')
utils.add_boolean_argument(
parser,
'--vlan-transparent',
default='False',
help='Allow VLAN tagged packets on tenant networks')
parser.add_argument(
'name', metavar='NAME',
help='Set user-defined name for a provider network')
def args2body(self, parsed_args):
body = {'providernet': {
'name': parsed_args.name,
'type': parsed_args.type,
'vlan_transparent': parsed_args.vlan_transparent}
}
if parsed_args.mtu:
body['providernet'].update({'mtu': parsed_args.mtu})
if parsed_args.description:
body['providernet'].update({'description':
parsed_args.description})
return body
class UpdateProviderNet(neutronV20.UpdateCommand):
"""Update a given provider network."""
log = logging.getLogger(__name__ + '.UpdateProviderNet')
resource = 'providernet'
allow_names = True
class DeleteProviderNet(neutronV20.DeleteCommand):
"""Delete a given provider network."""
log = logging.getLogger(__name__ + '.DeleteProviderNet')
resource = 'providernet'
allow_names = True
class ListProviderNetRange(neutronV20.ListCommand):
"""List provider network segmentation id range."""
resource = 'providernet_range'
log = logging.getLogger(__name__ + '.ListProviderNetRange')
list_columns = ['id', 'name', 'providernet', 'type',
'minimum', 'maximum', 'attributes']
sorting_support = True
def extend_list(self, data, parsed_args):
for entry in data:
# rename attributes
entry['providernet'] = entry['providernet_name']
entry['type'] = entry['providernet_type']
entry['attributes'] = ""
if 'vxlan' in entry:
# rename attribute
entry['attributes'] = _format_vxlan_attributes(entry['vxlan'])
del entry['vxlan']
def args2search_opts(self, parsed_args):
opts = super(ListProviderNetRange, self).args2search_opts(parsed_args)
opts.update({'sort_key': ['providernet_name', 'minimum'],
'sort_dir': ['asc', 'asc']})
return opts
class ShowProviderNetRange(neutronV20.ShowCommand):
"""Show information for a given provider network segmentation id range."""
resource = 'providernet_range'
log = logging.getLogger(__name__ + '.ShowProviderNetRange')
allow_names = True
json_indent = 5
def _id_range_value(value):
range_list = value.split('-')
if len(range_list) != 2:
raise argparse.ArgumentTypeError(
'Expecting MIN_VALUE-MAX_VALUE in range list')
return {'minimum': range_list[0],
'maximum': range_list[1]}
class CreateProviderNetRange(neutronV20.CreateCommand):
"""Create a provider network segmentation id range."""
resource = 'providernet_range'
log = logging.getLogger(__name__ + '.CreateProviderNetRange')
def add_known_arguments(self, parser):
parser.add_argument(
'--shared',
dest='shared', action='store_true', default=False,
help=('Set whether a provider network segmentation id range '
'may be shared between tenants'))
parser.add_argument(
'--description',
dest='description',
help='Set user-defined description field for a provider network')
parser.add_argument(
'--range', metavar='MIN_VALUE-MAX_VALUE', required=True,
dest='range', type=_id_range_value,
help='Segmentation id value range')
parser.add_argument(
'--name', required=True,
dest='name',
help=('Set user-defined name for a provider network '
'segmentation id range'))
parser.add_argument(
'--group',
dest='group',
help='Multicast IP addresses for VXLAN endpoints')
parser.add_argument(
'--ttl', dest='ttl', type=int,
help='Time-to-live value for VXLAN provider networks')
parser.add_argument(
'--port', dest='port', type=int,
help=('Destination UDP port value to use for '
'VXLAN provider networks'))
parser.add_argument(
'providernet_id', metavar='PROVIDERNET',
help='Provider network this segmentation id range belongs to')
parser.add_argument(
'--mode',
dest='mode', default='dynamic',
choices=['dynamic', 'static', 'evpn'],
help='Set vxlan learning mode')
def args2body(self, parsed_args):
_providernet_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'providernet', parsed_args.providernet_id)
body = {'providernet_range': {
'providernet_id': _providernet_id,
'name': parsed_args.name,
'description': parsed_args.description,
'shared': parsed_args.shared,
'minimum': parsed_args.range['minimum'],
'maximum': parsed_args.range['maximum']}
}
if parsed_args.tenant_id:
body['providernet_range'].update({'tenant_id':
parsed_args.tenant_id})
if parsed_args.port:
body['providernet_range'].update({'port': parsed_args.port})
if parsed_args.ttl:
body['providernet_range'].update({'ttl': parsed_args.ttl})
if parsed_args.group:
body['providernet_range'].update({'group': parsed_args.group})
if parsed_args.mode:
body['providernet_range'].update({'mode': parsed_args.mode})
return body
class UpdateProviderNetRange(neutronV20.UpdateCommand):
"""Update a given provider network segmentation id range."""
log = logging.getLogger(__name__ + '.UpdateProviderNetRange')
resource = 'providernet_range'
allow_names = True
def add_known_arguments(self, parser):
parser.add_argument(
'--description',
dest='description',
help='Set user-defined description field for a provider network')
parser.add_argument(
'--range', metavar='MIN_VALUE-MAX_VALUE',
dest='range', type=_id_range_value,
help='Segmentation id value range')
def args2body(self, parsed_args):
body = {'providernet_range': {}}
if parsed_args.description:
body['providernet_range'].update(
{'description': parsed_args.description})
if parsed_args.range:
body['providernet_range'].update(
{'minimum': parsed_args.range['minimum'],
'maximum': parsed_args.range['maximum']})
return body
class DeleteProviderNetRange(neutronV20.DeleteCommand):
"""Delete a given provider network segmentation id range."""
log = logging.getLogger(__name__ + '.DeleteProviderNetRange')
resource = 'providernet_range'
allow_names = True
def _format_segmentation_id(network):
if network['providernet_type'].lower() == "flat":
return "n/a"
return network['segmentation_id']
class ListNetworksOnProviderNet(neutronV20.ListCommand):
"""List the networks on a provider network."""
log = logging.getLogger(__name__ + '.ListNetworksOnProviderNet')
list_columns = ['id', 'name', 'vlan_id',
'providernet_type', 'segmentation_id',
'providernet_attributes']
_formatters = {'segmentation_id': _format_segmentation_id}
sorting_support = True
resource = 'network'
unknown_parts_flag = False
def extend_list(self, data, parsed_args):
for entry in data:
entry['providernet_attributes'] = ""
if 'vxlan' in entry:
# rename attribute
entry['providernet_attributes'] = (
_format_vxlan_attributes(entry['vxlan']))
del entry['vxlan']
def get_parser(self, prog_name):
parser = super(ListNetworksOnProviderNet,
self).get_parser(prog_name)
parser.add_argument(
'providernet',
help='Name of the provider network')
return parser
def call_server(self, neutron_client, search_opts, parsed_args):
_id = neutronV20.find_resourceid_by_name_or_id(neutron_client,
'providernet',
parsed_args.providernet)
search_opts['providernet'] = _id
data = neutron_client.list_networks_on_providernet(
_id, **search_opts)
return data
class ListProvidernetConnectivityTests(neutronV20.ListCommand):
"""List provider network connectivity test results."""
resource = 'providernet_connectivity_test'
log = logging.getLogger(__name__ + '.ListProviderConnectivityTests')
list_columns = ['providernet_id', 'providernet_name', 'type', 'host_name',
'segmentation_ids', 'status', 'message']
sorting_support = True
@staticmethod
def _list_segments(segments):
"""Takes a list of segments, and outputs them as a string"""
msg = ", ".join([str(x or "*") for x in sorted(segments)])
return msg
def _group_segmentation_id_list(self, segmentation_ids):
"""Takes a list of integers and groups them into ranges"""
if len(segmentation_ids) < 1:
return ""
try:
sorted_segmentation_ids = sorted(
[int(segmentation_id) for segmentation_id in segmentation_ids]
)
except Exception:
return self._list_segments(segmentation_ids)
grouped_ids = [tuple(g[1]) for g in itertools.groupby(
enumerate(sorted_segmentation_ids), lambda (i, n): i - n
)]
msg = ", ".join(
[(("%s-%s" % (g[0][1], g[-1][1])) if g[0][1] != g[-1][1]
else ("%s" % g[0][1])) for g in grouped_ids]
)
return msg
def _connectivity_results_to_formatted_dict(self, data):
"""Takes a list of results, and formats them for reporting"""
parsed_results = {}
message_key = "hidden"
for result in data:
providernet_id = result["providernet_id"]
providernet_name = result["providernet_name"]
providernet_type = result["type"]
hostname = result["host_name"]
segmentation_id = result.get("segmentation_id", None)
status = result["status"]
message = result["message"]
if message:
message_key = "message"
test = (providernet_id, providernet_name, providernet_type,
hostname, status, message)
if test not in parsed_results:
parsed_results[test] = []
parsed_results[test].append(segmentation_id)
formatted_results = []
for test, results in parsed_results.iteritems():
(providernet_id, providernet_name, providernet_type,
hostname, status, message) = test
formatted_segmentation_ids = \
self._group_segmentation_id_list(results)
formatted_result = {"providernet_id": providernet_id,
"providernet_name": providernet_name,
"type": providernet_type,
"host_name": hostname,
"status": status,
message_key: message,
"segmentation_ids": formatted_segmentation_ids}
formatted_results.append(formatted_result)
return formatted_results
def extend_list(self, data, parsed_args):
formatted_data = self._connectivity_results_to_formatted_dict(data)
del data[:]
data.extend(formatted_data)
def args2search_opts(self, parsed_args):
opts = super(ListProvidernetConnectivityTests, self).args2search_opts(
parsed_args
)
opts.update({'sort_key': ['status', 'hostname', 'providernet',
'audit_uuid'],
'sort_dir': ['asc', 'asc', 'asc', 'asc']})
return opts
class CreateProvidernetConnectivityTests(neutronV20.CreateCommand):
"""Schedules a providernet connectivity test to be run"""
resource = 'providernet_connectivity_test'
log = logging.getLogger(__name__ + '.CreateProvidernetConnectivityTests')
def add_known_arguments(self, parser):
parser.add_argument(
'--providernet',
dest='providernet', default=None,
help='Schedule audit for given providernet')
parser.add_argument(
'--host',
dest='host', default=None,
help='Schedule audits for all providernets on host')
parser.add_argument(
'--segmentation_id',
dest='segmentation_id', default=None,
help='Schedule for this segmentation ID')
def args2body(self, parsed_args):
if parsed_args.providernet:
_providernet_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'providernet', parsed_args.providernet
)
else:
_providernet_id = None
body = {'providernet_connectivity_test': {
"providernet_id": _providernet_id,
"host_name": parsed_args.host,
"segmentation_id": parsed_args.segmentation_id,
}}
return body
| 36.448802 | 79 | 0.608488 |
312060022c3cebf40a1611edb1ce897c2e7827c1
| 1,849 |
py
|
Python
|
interpret/attr/gradcam.py
|
ttumiel/interpret
|
aeecb00bf65376668a48895cb707beb6dd8fb7ab
|
[
"MIT"
] | 14 |
2019-10-28T18:49:31.000Z
|
2021-03-25T12:13:35.000Z
|
interpret/attr/gradcam.py
|
ttumiel/interpret
|
aeecb00bf65376668a48895cb707beb6dd8fb7ab
|
[
"MIT"
] | null | null | null |
interpret/attr/gradcam.py
|
ttumiel/interpret
|
aeecb00bf65376668a48895cb707beb6dd8fb7ab
|
[
"MIT"
] | null | null | null |
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from interpret.hooks import *
from interpret import core
from interpret.utils import denorm
from interpret.attr import Attribute
class Gradcam(Attribute):
"""Generates a Grad-CAM attribution map for convolutional neural networks.
Parameters:
model: PyTorch model.
img (torch.Tensor): input tensor fed into the network for attribution.
im_class (int): the class that the network is attributing on.
layer (int): the layer the network is using for the attribution. See [1].
heatmap_thresh (int): prevents heatmaps from being created when the
feature map is less than 2x2 pixels.
Returns:
The Grad-CAM heatmap (torch.Tensor)
References:
[1] - Grad-CAM: Visual Explanations from Deep Networks via
Gradient-based Localization. https://arxiv.org/abs/1610.02391
"""
def __init__(self, model, img, im_class, layer=0, heatmap_thresh=16):
self.input_data = img
m = model.eval()
cl = int(im_class)
xb = img
m[layer].requires_grad_(True)
with hook_output(m[layer]) as hook_a:
with hook_output(m[layer], grad=True) as hook_g:
preds = m(xb)
preds[0,int(cl)].backward()
acts = hook_a.stored[0].cpu()
grad = hook_g.stored[0][0].cpu()
if (acts.shape[-1]*acts.shape[-2]) >= heatmap_thresh:
grad_chan = grad.mean(1).mean(1)
self.data = F.relu(((acts*grad_chan[...,None,None])).sum(0))
else:
raise ValueError("Image not large enough to create a heatmap. Increase "
"size of image or move the layer further down into the "
"network")
| 37.734694 | 84 | 0.623039 |
902b33157b66a69cb01cc2b62cda9c9381021fe3
| 4,971 |
py
|
Python
|
src/adcpyproc/misc_functions.py
|
oyvlun/adcpyproc
|
1d2dd81c1295bb460a32badd5565976869a6e0b9
|
[
"MIT"
] | null | null | null |
src/adcpyproc/misc_functions.py
|
oyvlun/adcpyproc
|
1d2dd81c1295bb460a32badd5565976869a6e0b9
|
[
"MIT"
] | null | null | null |
src/adcpyproc/misc_functions.py
|
oyvlun/adcpyproc
|
1d2dd81c1295bb460a32badd5565976869a6e0b9
|
[
"MIT"
] | null | null | null |
#######################################################################
# MISC_FUNCTIONS
#######################################################################
'''
Various functions that are not specific to the classes defined in
adcpyproc:
- Find magnetic declination from lon/lat/time using geomag.
- Convert mpl time to matlab time
'''
import numpy as np
import datetime as dt
try:
import geomag
except ModuleNotFoundError:
raise Warning("Couldn't load module *geomag* -> won't be "
"able to apply magnetic declination correction.")
from adcpyproc import __file__ as _initloc
# Get the path of the folder containing the .COF file (a little hacky..)
cofdir = _initloc.replace('__init__.py', 'WMM_COF_files/')
#######################################################################
def declination(dlat, dlon, time, h=0, model=None, explicit=True):
"""
Wrapper for function in the *geomag* module by Christopher
Weiss (https://pypi.org/project/geomag/).
Used to calculate magnetic declination based on World
Magnetic Model coefficients allowing different WMM models.
Allows interation over several times (and lat/lons).
(implentation is a bit clunky but works fine for limited datasets).
Calculate magnetic declination in degrees
dlat = Latitude in degrees (float or array)
dlon = Longitude in degrees (float or array)
h = Altitude in feet, default=0
time = Date for computing declination (float or array)
model = Magnetic model (WMM). Default behaviour is to select the
model based on the time. Possible inputs are 'WMM2010',
'WMM2015', 'WMM2020'.
Returns: Magnetic declination in degrees (float or array depending
on the input)
NOTE 1: Each WMM model is valid for five years. The three models
included here are valid between 1-1-2010 and 31-12-2024. Caution
is advised outside of this time range.
NOTE 2: Discontinuities can occur when switching between models
(e.g. at 1-1-2015 00:00). Consider using a single WMM model for
a single dataset.
The World Magnetic Model is a representation of the Earth's magntic
field developed by the US National Geophysical Data Center and the
British Geological Survey. .COF files with coefficients were
downloaded from www.ncei.noaa.gov.
"""
if not model:
if time.year > 2019:
model = 'WMM2020'
if time.year > 2024:
raise Warning('WMM2020 is only valid through 2024.'
' Proceed with caution..')
elif time.year < 2015:
model = 'WMM2010'
if time.year < 2010:
raise Warning('WMM2010 is only valid starting in 2010.'
' Proceed with caution..')
else:
model = 'WMM2015'
if explicit:
print('Using %s to compute declination..' % model)
wmm_filename='%s%s.COF'% (cofdir, model)
__singleton__ = geomag.geomag.GeoMag(wmm_filename=wmm_filename)
# For a single time entry
if not hasattr(time, '__iter__'):
time_date = dt.date(*time.timetuple()[:-6])
magdec = __singleton__.GeoMag(dlat, dlon, h, time_date).dec
# For many time entries
else:
magdec = np.array([])
time_dates = [dt.date(*t_.timetuple()[:-6]) for t_ in time]
N = len(time)
# For a single lat/lon entry:
if not hasattr(dlon, '__iter__'):
for nn, time_ in enumerate(time_dates):
magdec_ = __singleton__.GeoMag(dlat, dlon, h, time_).dec
magdec = np.append(magdec, magdec_)
if explicit:
print('%.2f %%..\r'%(100*nn/N), end = '')
# For multiple lat/lon entries:
else:
if time.shape != dlat.shape:
raise Exception('magdec calculation: lat/lon must be constant'
'or have the same shape as t!')
for time_, dlon_, dlat_ in zip(time_dates, dlon, dlat):
magdec_ = __singleton__.GeoMag(dlat_, dlon_, h, time_).dec
magdec = np.append(magdec, magdec_)
if explicit:
print('%.2f %%..\r'%(100*nn/N), end = '')
print('... done.')
return magdec
#######################################################################
def t_python_to_mat(pytime):
'''
Convert matlab datenum (days) to Matplotlib dates (days).
MATLAB base: 00-Jan-0000
Matplotlib base: 01-Jan-1970
'''
mattime = pytime + 719529.0
return mattime
#########################
def closest(x, x0):
'''
Takes an 1d array x and returns the index at which x is closest to a
value x0
'''
if len(np.shape(x))>1:
raise TypeError('Only works for 1-d arrays right now.')
dx = abs(x-x0)
ind = np.nonzero(dx==np.ma.min(dx))[0]
return ind[0]
| 33.587838 | 79 | 0.576544 |
8be4ea792ed87a528997915de0ab58118373f91f
| 71 |
py
|
Python
|
dice_ml/__init__.py
|
prabhathur/CF
|
20943f3f326e72ea7c5464bc2c3eee06703ed404
|
[
"MIT"
] | 2 |
2021-05-19T04:29:06.000Z
|
2021-07-22T17:25:08.000Z
|
dice_ml/__init__.py
|
prabhathur/CF
|
20943f3f326e72ea7c5464bc2c3eee06703ed404
|
[
"MIT"
] | null | null | null |
dice_ml/__init__.py
|
prabhathur/CF
|
20943f3f326e72ea7c5464bc2c3eee06703ed404
|
[
"MIT"
] | 3 |
2019-11-24T01:06:24.000Z
|
2020-11-24T14:04:09.000Z
|
from .data import Data
from .model import Model
from .dice import Dice
| 17.75 | 24 | 0.788732 |
3693a8820783a6f7cd7b934dcf1b27e5a6df3474
| 4,997 |
py
|
Python
|
Python/getsubinterfaces.py
|
krapsusa/netmri-toolkit
|
5740cd90e2fe242b8af3ec6192fca10a30cfa94d
|
[
"MIT"
] | 17 |
2015-09-17T18:25:55.000Z
|
2020-08-27T00:16:54.000Z
|
Python/getsubinterfaces.py
|
krapsusa/netmri-toolkit
|
5740cd90e2fe242b8af3ec6192fca10a30cfa94d
|
[
"MIT"
] | 2 |
2019-08-08T17:33:32.000Z
|
2019-10-10T15:36:55.000Z
|
Python/getsubinterfaces.py
|
krapsusa/netmri-toolkit
|
5740cd90e2fe242b8af3ec6192fca10a30cfa94d
|
[
"MIT"
] | 19 |
2016-04-25T12:45:31.000Z
|
2020-09-28T18:24:51.000Z
|
#Device subinterface data retrieval script. Copyright Ingmar Van Glabbeek [email protected]
#Licensed under Apache-2.0
#This script will pull all devices of a given device group and then list the devices management ip as well as the available management ips.
#By default it saves the output to "deviceinterfacedump.json"
#Tested on NetMRI 7.3.1 and 7.3.2
#Modules required:
import getpass
import requests
import json
import urllib3
from requests.auth import HTTPBasicAuth
from http.client import responses
import time
#You can hardcode credentials here, it's not safe. Don't do it.
#hostname = "netmri.infoblox.com"
#username = "admin"
#password = "infoblox"
#urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def main():
cookie_host = wapi_connect()
#print(cookie_host)
devicelist = getdevices(cookie_host)
filtered_data = devicedata(devicelist)
#uncomment next line if you want to write to console
#print(json.dumps(filtered_data,indent=4, sort_keys=True))
filename = open("deviceinterfacedump.json","w")
filename.write(json.dumps(filtered_data,indent=4))
filename.close()
print("Data retrieved successfully")
def devicedata(devicelist):
listload = json.loads(devicelist)
data = []
for e in listload['rows']:
if not e["if_addrs"]:
device = {"DeviceID":e["DeviceID"],"DeviceName":e["DeviceName"],"DeviceType":e["DeviceType"],"DeviceIPDotted":e["DeviceIPDotted"],"Other InterfaceIP":["none"]}
data.append(device)
else:
device = {"DeviceID": e['DeviceID'], "DeviceName": e["DeviceName"], "DeviceType": e["DeviceType"],
"DeviceIPDotted": e["DeviceIPDotted"], "Other InterfaceIP":[]}
for f in e["if_addrs"]:
i=1
interface = {"InterfaceIP":f["ifIPDotted"], "Interfacename":f["ifName"]}
device["Other InterfaceIP"].insert(i,interface)
data.append(device)
i=i+1
dataftw=json.dumps(data)
returndata=json.loads(dataftw)
return returndata
def getdevices(cookie_host):
if not cookie_host:
print("No connection established.")
return 0
#get current time
ts = time.time()
hostname=cookie_host[1]
#limits number of results
limit = input("Limit to this number of devices: ")
get_url = "https://" + hostname + "/api/3.3/device_groups/index"
response = requests.get(get_url, cookies=cookie_host[0], verify=False)
d=response.text
dl=json.loads(d)
print("List of DeviceGroups")
for e in dl["device_groups"]:
dglist={"GroupName":e["GroupName"],"GroupID":e["GroupID"]}
print(dglist)
devicegroup = input("Based on the output specify the devicegroup ID by its ID: ")
get_url = "https://" + hostname + "/api/3.3/discovery_statuses/static/current.extjs"
querystring = {"_dc": ts, "filename": "recent_activity.csv", "filter": "null", "limit": limit,
"GroupID": devicegroup}
response = requests.get(get_url, cookies=cookie_host[0], verify=False, params=querystring)
t=response.text
print("We are fetching a list of " + str(limit) +
" devices for devicegroup " + str(devicegroup) + ".")
return(t)
def wapi_connect():
hostname = input("Enter the NetMRI hostname or IP: ")
username = input("Enter your NetMRI username: ")
password = getpass.getpass("Enter your Password: ")
https_val = input("Disable SSL validations?(y/n) ")
if https_val in ("y", "Y"):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
print("SSL validation disabled")
if https_val in ("n", "N"):
print("SSL validation enabled")
login_url = "https://" + hostname + "/api/3.3/device_groups/index"
print("logging in to " + hostname)
try:
login_result = requests.get(
login_url,
auth=HTTPBasicAuth(username, password),
timeout=5,
verify=False)
except requests.exceptions.ConnectTimeout as e:
print("Connection time out after 5 seconds.")
exit(1)
except requests.exceptions.ConnectionError as e:
print("No route to host " + hostname)
exit(1)
if has_error(login_result):
exit(1)
else:
print("Login OK")
return(login_result.cookies,hostname)
def has_error(_result):
if _result.status_code == 200:
return 0
elif _result.status_code == 201:
return 0
try:
err_text = _result.json()['text']
except KeyError as e:
err_text = "Response contains no error text"
except json.decoder.JSONDecodeError as e:
err_text = "No JSON Response"
# print out the HTTP response code, description, and error text
http_code = _result.status_code
http_desc = responses[http_code]
print("HTTP Code [%3d] %s. %s" % (http_code, http_desc, err_text))
return 1
if __name__ == "__main__":
main()
| 34.462069 | 171 | 0.654993 |
84170e4c09ccb81a0cc041807fe28d01807cafe3
| 839 |
py
|
Python
|
corehq/apps/userreports/tests/__init__.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/userreports/tests/__init__.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | 1 |
2022-03-12T01:03:25.000Z
|
2022-03-12T01:03:25.000Z
|
corehq/apps/userreports/tests/__init__.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | null | null | null |
from .test_app_manager_integration import *
from .test_columns import *
from .test_static_data_sources import *
from .test_static_reports import *
from .test_export import *
from .test_expressions import *
from .test_filters import *
from .test_getters import *
from .test_data_source_config import *
from .test_data_source_repeats import *
from .test_multi_db import *
from .test_indicators import *
from .test_pillow import *
from .test_report_builder import *
from .test_report_charts import *
from .test_report_config import *
from .test_report_filters import *
from .test_sorting import *
from .test_transforms import *
from .test_utils import *
from .test_view import *
from .test_dbaccessors import *
from corehq.apps.userreports.expressions.getters import recursive_lookup
__test__ = {
'recursive_lookup': recursive_lookup
}
| 28.931034 | 72 | 0.811681 |
24538d5f8b3f69b99cfa68bd089e7cf1c803e6c3
| 12,801 |
py
|
Python
|
research/learned_optimizer/optimizer/coordinatewise_rnn.py
|
SimiaCryptus/models
|
c652a23a650070b71e286f1ded93726670161940
|
[
"Apache-2.0"
] | null | null | null |
research/learned_optimizer/optimizer/coordinatewise_rnn.py
|
SimiaCryptus/models
|
c652a23a650070b71e286f1ded93726670161940
|
[
"Apache-2.0"
] | null | null | null |
research/learned_optimizer/optimizer/coordinatewise_rnn.py
|
SimiaCryptus/models
|
c652a23a650070b71e286f1ded93726670161940
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Collection of trainable optimizers for meta-optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
from learned_optimizer.optimizer import trainable_optimizer as opt
from learned_optimizer.optimizer import utils
# Default was 1e-3
tf.app.flags.DEFINE_float("crnn_rnn_readout_scale", 0.5,
"""The initialization scale for the RNN readouts.""")
tf.app.flags.DEFINE_float("crnn_default_decay_var_init", 2.2,
"""The default initializer value for any decay/
momentum style variables and constants.
sigmoid(2.2) ~ 0.9, sigmoid(-2.2) ~ 0.01.""")
FLAGS = tf.flags.FLAGS
class CoordinatewiseRNN(opt.TrainableOptimizer):
"""RNN that operates on each coordinate of the problem independently."""
def __init__(self,
cell_sizes,
cell_cls,
init_lr_range=(1., 1.),
dynamic_output_scale=True,
learnable_decay=True,
zero_init_lr_weights=False,
**kwargs):
"""Initializes the RNN per-parameter optimizer.
Args:
cell_sizes: List of hidden state sizes for each RNN cell in the network
cell_cls: tf.contrib.rnn class for specifying the RNN cell type
init_lr_range: the range in which to initialize the learning rates.
dynamic_output_scale: whether to learn weights that dynamically modulate
the output scale (default: True)
learnable_decay: whether to learn weights that dynamically modulate the
input scale via RMS style decay (default: True)
zero_init_lr_weights: whether to initialize the lr weights to zero
**kwargs: args passed to TrainableOptimizer's constructor
Raises:
ValueError: If the init lr range is not of length 2.
ValueError: If the init lr range is not a valid range (min > max).
"""
if len(init_lr_range) != 2:
raise ValueError(
"Initial LR range must be len 2, was {}".format(len(init_lr_range)))
if init_lr_range[0] > init_lr_range[1]:
raise ValueError("Initial LR range min is greater than max.")
self.init_lr_range = init_lr_range
self.zero_init_lr_weights = zero_init_lr_weights
self.reuse_vars = False
# create the RNN cell
with tf.variable_scope(opt.OPTIMIZER_SCOPE):
self.component_cells = [cell_cls(sz) for sz in cell_sizes]
self.cell = tf.contrib.rnn.MultiRNNCell(self.component_cells)
# random normal initialization scaled by the output size
scale_factor = FLAGS.crnn_rnn_readout_scale / math.sqrt(cell_sizes[-1])
scaled_init = tf.random_normal_initializer(0., scale_factor)
# weights for projecting the hidden state to a parameter update
self.update_weights = tf.get_variable("update_weights",
shape=(cell_sizes[-1], 1),
initializer=scaled_init)
self._initialize_decay(learnable_decay, (cell_sizes[-1], 1), scaled_init)
self._initialize_lr(dynamic_output_scale, (cell_sizes[-1], 1),
scaled_init)
state_size = sum([sum(state_size) for state_size in self.cell.state_size])
self._init_vector = tf.get_variable(
"init_vector", shape=[1, state_size],
initializer=tf.random_uniform_initializer(-1., 1.))
state_keys = ["rms", "rnn", "learning_rate", "decay"]
super(CoordinatewiseRNN, self).__init__("cRNN", state_keys, **kwargs)
def _initialize_decay(
self, learnable_decay, weights_tensor_shape, scaled_init):
"""Initializes the decay weights and bias variables or tensors.
Args:
learnable_decay: Whether to use learnable decay.
weights_tensor_shape: The shape the weight tensor should take.
scaled_init: The scaled initialization for the weights tensor.
"""
if learnable_decay:
# weights for projecting the hidden state to the RMS decay term
self.decay_weights = tf.get_variable("decay_weights",
shape=weights_tensor_shape,
initializer=scaled_init)
self.decay_bias = tf.get_variable(
"decay_bias", shape=(1,),
initializer=tf.constant_initializer(
FLAGS.crnn_default_decay_var_init))
else:
self.decay_weights = tf.zeros_like(self.update_weights)
self.decay_bias = tf.constant(FLAGS.crnn_default_decay_var_init)
def _initialize_lr(
self, dynamic_output_scale, weights_tensor_shape, scaled_init):
"""Initializes the learning rate weights and bias variables or tensors.
Args:
dynamic_output_scale: Whether to use a dynamic output scale.
weights_tensor_shape: The shape the weight tensor should take.
scaled_init: The scaled initialization for the weights tensor.
"""
if dynamic_output_scale:
zero_init = tf.constant_initializer(0.)
wt_init = zero_init if self.zero_init_lr_weights else scaled_init
self.lr_weights = tf.get_variable("learning_rate_weights",
shape=weights_tensor_shape,
initializer=wt_init)
self.lr_bias = tf.get_variable("learning_rate_bias", shape=(1,),
initializer=zero_init)
else:
self.lr_weights = tf.zeros_like(self.update_weights)
self.lr_bias = tf.zeros([1, 1])
def _initialize_state(self, var):
"""Return a dictionary mapping names of state variables to their values."""
vectorized_shape = [var.get_shape().num_elements(), 1]
min_lr = self.init_lr_range[0]
max_lr = self.init_lr_range[1]
if min_lr == max_lr:
init_lr = tf.constant(min_lr, shape=vectorized_shape)
else:
actual_vals = tf.random_uniform(vectorized_shape,
np.log(min_lr),
np.log(max_lr))
init_lr = tf.exp(actual_vals)
ones = tf.ones(vectorized_shape)
rnn_init = ones * self._init_vector
return {
"rms": tf.ones(vectorized_shape),
"learning_rate": init_lr,
"rnn": rnn_init,
"decay": tf.ones(vectorized_shape),
}
def _compute_update(self, param, grad, state):
"""Update parameters given the gradient and state.
Args:
param: tensor of parameters
grad: tensor of gradients with the same shape as param
state: a dictionary containing any state for the optimizer
Returns:
updated_param: updated parameters
updated_state: updated state variables in a dictionary
"""
with tf.variable_scope(opt.OPTIMIZER_SCOPE) as scope:
if self.reuse_vars:
scope.reuse_variables()
else:
self.reuse_vars = True
param_shape = tf.shape(param)
(grad_values, decay_state, rms_state, rnn_state, learning_rate_state,
grad_indices) = self._extract_gradients_and_internal_state(
grad, state, param_shape)
# Vectorize and scale the gradients.
grad_scaled, rms = utils.rms_scaling(grad_values, decay_state, rms_state)
# Apply the RNN update.
rnn_state_tuples = self._unpack_rnn_state_into_tuples(rnn_state)
rnn_output, rnn_state_tuples = self.cell(grad_scaled, rnn_state_tuples)
rnn_state = self._pack_tuples_into_rnn_state(rnn_state_tuples)
# Compute the update direction (a linear projection of the RNN output).
delta = utils.project(rnn_output, self.update_weights)
# The updated decay is an affine projection of the hidden state
decay = utils.project(rnn_output, self.decay_weights,
bias=self.decay_bias, activation=tf.nn.sigmoid)
# Compute the change in learning rate (an affine projection of the RNN
# state, passed through a 2x sigmoid, so the change is bounded).
learning_rate_change = 2. * utils.project(rnn_output, self.lr_weights,
bias=self.lr_bias,
activation=tf.nn.sigmoid)
# Update the learning rate.
new_learning_rate = learning_rate_change * learning_rate_state
# Apply the update to the parameters.
update = tf.reshape(new_learning_rate * delta, tf.shape(grad_values))
if isinstance(grad, tf.IndexedSlices):
update = utils.stack_tensor(update, grad_indices, param,
param_shape[:1])
rms = utils.update_slices(rms, grad_indices, state["rms"], param_shape)
new_learning_rate = utils.update_slices(new_learning_rate, grad_indices,
state["learning_rate"],
param_shape)
rnn_state = utils.update_slices(rnn_state, grad_indices, state["rnn"],
param_shape)
decay = utils.update_slices(decay, grad_indices, state["decay"],
param_shape)
new_param = param - update
# Collect the update and new state.
new_state = {
"rms": rms,
"learning_rate": new_learning_rate,
"rnn": rnn_state,
"decay": decay,
}
return new_param, new_state
def _extract_gradients_and_internal_state(self, grad, state, param_shape):
"""Extracts the gradients and relevant internal state.
If the gradient is sparse, extracts the appropriate slices from the state.
Args:
grad: The current gradient.
state: The current state.
param_shape: The shape of the parameter (used if gradient is sparse).
Returns:
grad_values: The gradient value tensor.
decay_state: The current decay state.
rms_state: The current rms state.
rnn_state: The current state of the internal rnns.
learning_rate_state: The current learning rate state.
grad_indices: The indices for the gradient tensor, if sparse.
None otherwise.
"""
if isinstance(grad, tf.IndexedSlices):
grad_indices, grad_values = utils.accumulate_sparse_gradients(grad)
decay_state = utils.slice_tensor(state["decay"], grad_indices,
param_shape)
rms_state = utils.slice_tensor(state["rms"], grad_indices, param_shape)
rnn_state = utils.slice_tensor(state["rnn"], grad_indices, param_shape)
learning_rate_state = utils.slice_tensor(state["learning_rate"],
grad_indices, param_shape)
decay_state.set_shape([None, 1])
rms_state.set_shape([None, 1])
else:
grad_values = grad
grad_indices = None
decay_state = state["decay"]
rms_state = state["rms"]
rnn_state = state["rnn"]
learning_rate_state = state["learning_rate"]
return (grad_values, decay_state, rms_state, rnn_state, learning_rate_state,
grad_indices)
def _unpack_rnn_state_into_tuples(self, rnn_state):
"""Creates state tuples from the rnn state vector."""
rnn_state_tuples = []
cur_state_pos = 0
for cell in self.component_cells:
total_state_size = sum(cell.state_size)
cur_state = tf.slice(rnn_state, [0, cur_state_pos],
[-1, total_state_size])
cur_state_tuple = tf.split(value=cur_state, num_or_size_splits=2,
axis=1)
rnn_state_tuples.append(cur_state_tuple)
cur_state_pos += total_state_size
return rnn_state_tuples
def _pack_tuples_into_rnn_state(self, rnn_state_tuples):
"""Creates a single state vector concatenated along column axis."""
rnn_state = None
for new_state_tuple in rnn_state_tuples:
new_c, new_h = new_state_tuple
if rnn_state is None:
rnn_state = tf.concat([new_c, new_h], axis=1)
else:
rnn_state = tf.concat([rnn_state, tf.concat([new_c, new_h], 1)], axis=1)
return rnn_state
| 40.638095 | 80 | 0.650965 |
5e6599c413e9b6f159b74971339a93e99ad57029
| 2,147 |
py
|
Python
|
python/hsfs/core/training_dataset_job_conf.py
|
kouzant/feature-store-api
|
f6a666e11fd33ae814a79c588ff49547b942b09d
|
[
"Apache-2.0"
] | 49 |
2020-09-07T17:43:11.000Z
|
2021-12-28T10:41:03.000Z
|
python/hsfs/core/training_dataset_job_conf.py
|
kouzant/feature-store-api
|
f6a666e11fd33ae814a79c588ff49547b942b09d
|
[
"Apache-2.0"
] | 132 |
2020-08-06T12:12:09.000Z
|
2022-03-29T16:28:25.000Z
|
python/hsfs/core/training_dataset_job_conf.py
|
isabella232/feature-store-api
|
6f90c6039519422114c35ed47e1ea8765134e7ba
|
[
"Apache-2.0"
] | 35 |
2020-08-06T12:09:02.000Z
|
2022-01-10T08:50:45.000Z
|
#
# Copyright 2021 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from hsfs import util
class TrainingDatsetJobConf:
def __init__(self, query, overwrite, write_options, spark_job_configuration):
self._query = query
self._overwrite = overwrite
self._write_options = write_options
self._spark_job_configuration = spark_job_configuration
@property
def query(self):
return self._query
@query.setter
def query(self, query):
self._query = query
@property
def overwrite(self):
return self._overwrite
@overwrite.setter
def overwrite(self, overwrite):
self._overwrite = overwrite
@property
def write_options(self):
return self._write_options
@write_options.setter
def write_options(self, write_options):
self._write_options = write_options
@property
def spark_job_configuration(self):
return self._spark_job_configuration
@spark_job_configuration.setter
def spark_job_configuration(self, spark_job_configuration):
self._spark_job_configuration = spark_job_configuration
def json(self):
return json.dumps(self, cls=util.FeatureStoreEncoder)
def to_dict(self):
return {
"query": self._query,
"overwrite": self._overwrite,
"writeOptions": [
{"name": k, "value": v} for k, v in self._write_options.items()
]
if self._write_options
else None,
"sparkJobConfiguration": self._spark_job_configuration,
}
| 29.013514 | 81 | 0.676758 |
27a6ccee90e7a814126f9a3a2cd8ac3505535718
| 5,764 |
py
|
Python
|
vmtkScripts/vmtksurfaceviewer.py
|
haehn/vmtk
|
e8e2ee9f9bea6a1839a75b57caf82f6a86944db0
|
[
"Apache-2.0"
] | 1 |
2017-02-23T09:31:53.000Z
|
2017-02-23T09:31:53.000Z
|
vmtkScripts/vmtksurfaceviewer.py
|
haehn/vmtk
|
e8e2ee9f9bea6a1839a75b57caf82f6a86944db0
|
[
"Apache-2.0"
] | null | null | null |
vmtkScripts/vmtksurfaceviewer.py
|
haehn/vmtk
|
e8e2ee9f9bea6a1839a75b57caf82f6a86944db0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtksurfaceviewer.py,v $
## Language: Python
## Date: $Date: 2006/05/26 12:35:13 $
## Version: $Revision: 1.10 $
## Copyright (c) Luca Antiga, David Steinman. All rights reserved.
## See LICENCE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
import vtk
import sys
import vmtkrenderer
import pypes
vmtksurfaceviewer = 'vmtkSurfaceViewer'
class vmtkSurfaceViewer(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Surface = None
self.vmtkRenderer = None
self.OwnRenderer = 0
self.Display = 1
self.Opacity = 1.0
self.ArrayName = ''
self.ScalarRange = [0.0, 0.0]
self.Legend = 0
self.LegendTitle = ''
self.Grayscale = 0
self.FlatInterpolation = 0
self.DisplayCellData = 0
self.Color = [-1.0, -1.0, -1.0]
self.LineWidth = 1
self.Actor = None
self.ScalarBarActor = None
self.SetScriptName('vmtksurfaceviewer')
self.SetScriptDoc('display a surface')
self.SetInputMembers([
['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'],
['vmtkRenderer','renderer','vmtkRenderer',1,'','external renderer'],
['Display','display','bool',1,'','toggle rendering'],
['Opacity','opacity','float',1,'(0.0,1.0)','obejct opacity in the scene'],
['ArrayName','array','str',1,'','name of the array where the scalars to be displayed are stored'],
['ScalarRange','scalarrange','float',2,'','range of the scalar map'],
['Legend','legend','bool',1,'','toggle scalar bar'],
['Grayscale','grayscale','bool',1,'','toggle color or grayscale'],
['FlatInterpolation','flat','bool',1,'','toggle flat or shaded surface display'],
['DisplayCellData','celldata','bool',1,'','toggle display of point or cell data'],
['Color','color','float',3,'','RGB color of the object in the scene'],
['LineWidth','linewidth','int',1,'(0.0,)','width of line objects in the scene'],
['LegendTitle','legendtitle','str',1,'','title of the scalar bar']])
self.SetOutputMembers([
['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter'],
['Actor','oactor','vtkActor',1,'','the output actor']
])
def BuildView(self):
if not self.vmtkRenderer:
self.vmtkRenderer = vmtkrenderer.vmtkRenderer()
self.vmtkRenderer.Initialize()
self.OwnRenderer = 1
if self.Actor:
self.vmtkRenderer.Renderer.RemoveActor(self.Actor)
if self.ScalarBarActor:
self.vmtkRenderer.Renderer.RemoveActor(self.ScalarBarActor)
if self.Surface:
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(self.Surface)
if self.ArrayName:
if self.DisplayCellData == 0:
self.Surface.GetPointData().SetActiveScalars(self.ArrayName)
array = self.Surface.GetPointData().GetScalars()
else:
self.Surface.GetCellData().SetActiveScalars(self.ArrayName)
array = self.Surface.GetCellData().GetScalars()
mapper.SetScalarModeToUseCellData()
if (self.ScalarRange[1] > self.ScalarRange[0]):
mapper.SetScalarRange(self.ScalarRange)
elif array:
mapper.SetScalarRange(array.GetRange(0))
if self.Grayscale:
lut = vtk.vtkLookupTable()
lut.SetValueRange(0.0,1.0)
lut.SetSaturationRange(0.0,0.0)
mapper.SetLookupTable(lut)
else:
mapper.ScalarVisibilityOff()
self.Actor = vtk.vtkActor()
self.Actor.SetMapper(mapper)
if (self.Color[0] >= 0.0):
self.Actor.GetProperty().SetColor(self.Color)
self.Actor.GetProperty().SetOpacity(self.Opacity)
self.Actor.GetProperty().SetLineWidth(self.LineWidth)
if self.FlatInterpolation:
self.Actor.GetProperty().SetInterpolationToFlat()
self.vmtkRenderer.Renderer.AddActor(self.Actor)
if self.Legend and self.Actor:
self.ScalarBarActor = vtk.vtkScalarBarActor()
self.ScalarBarActor.SetLookupTable(self.Actor.GetMapper().GetLookupTable())
self.ScalarBarActor.GetLabelTextProperty().ItalicOff()
self.ScalarBarActor.GetLabelTextProperty().BoldOff()
self.ScalarBarActor.GetLabelTextProperty().ShadowOff()
## self.ScalarBarActor.GetLabelTextProperty().SetColor(0.0,0.0,0.0)
self.ScalarBarActor.SetLabelFormat('%.2f')
self.ScalarBarActor.SetTitle(self.LegendTitle)
self.vmtkRenderer.Renderer.AddActor(self.ScalarBarActor)
if self.Display:
self.vmtkRenderer.Render()
## self.vmtkRenderer.Renderer.RemoveActor(self.Actor)
## self.vmtkRenderer.Renderer.RemoveActor(self.ScalarBarActor)
if self.OwnRenderer:
self.vmtkRenderer.Deallocate()
def Execute(self):
if (not self.Surface) and self.Display:
self.PrintError('Error: no Surface.')
self.BuildView()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
| 39.751724 | 110 | 0.598543 |
2933852f97cfef69a895b382d81d8dbc1e584678
| 2,415 |
py
|
Python
|
mlir/test/python/dialects/linalg/opdsl/shape_maps_iteration.py
|
jessicadavies-intel/llvm
|
4236bbba4c562a1355e75fa6d237b7c6b15a3193
|
[
"Apache-2.0"
] | 1 |
2022-01-06T15:44:48.000Z
|
2022-01-06T15:44:48.000Z
|
mlir/test/python/dialects/linalg/opdsl/shape_maps_iteration.py
|
jessicadavies-intel/llvm
|
4236bbba4c562a1355e75fa6d237b7c6b15a3193
|
[
"Apache-2.0"
] | 2 |
2019-06-27T00:36:28.000Z
|
2021-06-29T20:05:03.000Z
|
mlir/test/python/dialects/linalg/opdsl/shape_maps_iteration.py
|
kbobrovs/llvm
|
b57c6bf3b16e6d3f6c052ba9ba3616a24e0beae5
|
[
"Apache-2.0"
] | null | null | null |
# RUN: %PYTHON -m mlir.dialects.linalg.opdsl.dump_oplib --file %s | FileCheck %s
from mlir.dialects.linalg.opdsl.lang import *
# Verify that simple case with iteration order defined lexically and reduction
# dims auto discovered emits the right shape, indexing maps and iterator types.
# CHECK: ---
# CHECK-LABEL: matmul
# CHECK: shape_map: affine_map<()[s0, s1, s2] -> (s0, s2)>
# CHECK: shape_map: affine_map<()[s0, s1, s2] -> (s2, s1)>
# CHECK: shape_map: affine_map<()[s0, s1, s2] -> (s0, s1)>
# CHECK: static_indexing_maps:
# CHECK-NEXT: - affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0, d2)>
# CHECK-NEXT: - affine_map<(d0, d1, d2)[s0, s1, s2] -> (d2, d1)>
# CHECK-NEXT: - affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0, d1)>
# CHECK: iterator_types:
# CHECK-NEXT: - parallel
# CHECK-NEXT: - parallel
# CHECK-NEXT: - reduction
@linalg_structured_op
def matmul(
A=TensorDef(T, S.M, S.K),
B=TensorDef(T, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
# Verifies that assignment to a scalar (represented as [None]) is represented
# correctly.
# CHECK: ---
# CHECK-LABEL: dot
# CHECK: shape_map: affine_map<()[s0] -> (s0)>
# CHECK: shape_map: affine_map<()[s0] -> (s0)>
# CHECK: shape_map: affine_map<()[s0] -> ()>
# CHECK: static_indexing_maps:
# CHECK-NEXT: - affine_map<(d0)[s0] -> (d0)>
# CHECK-NEXT: - affine_map<(d0)[s0] -> (d0)>
# CHECK-NEXT: - affine_map<(d0)[s0] -> ()>
# CHECK: iterator_types:
# CHECK-NEXT: - reduction
@linalg_structured_op
def dot(A=TensorDef(T, S.M), B=TensorDef(T, S.M), C=TensorDef(U, output=True)):
C[None] += cast(U, A[D.m]) * cast(U, B[D.m])
# Verifies that the index_dims of shape-only operands translate to correct
# indexing maps.
# CHECK: ---
# CHECK-LABEL: pool
# CHECK: shape_map: affine_map<()[s0, s1, s2] -> (s1)>
# CHECK: shape_map: affine_map<()[s0, s1, s2] -> (s2)>
# CHECK: shape_map: affine_map<()[s0, s1, s2] -> (s0)>
# CHECK: static_indexing_maps:
# CHECK-NEXT: - affine_map<(d0, d1)[s0, s1, s2] -> (d1 * 2 + d0)>
# CHECK-NEXT: - affine_map<(d0, d1)[s0, s1, s2] -> (d0)>
# CHECK-NEXT: - affine_map<(d0, d1)[s0, s1, s2] -> (d1)>
# CHECK: iterator_types:
# CHECK-NEXT: - reduction
# CHECK-NEXT: - parallel
@linalg_structured_op
def pool(I=TensorDef(T, S.I),
K=TensorDef(T, S.K, index_dims=[D.k]),
O=TensorDef(U, S.O, output=True)):
O[D.o] += cast(U, I[D.o * 2 + D.k])
| 37.153846 | 80 | 0.630642 |
6326c6ad6801538348c90de4982669759fe02d41
| 5,475 |
py
|
Python
|
packages/python/plotly/plotly/graph_objs/layout/ternary/caxis/_title.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/layout/ternary/caxis/_title.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/layout/ternary/caxis/_title.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Title(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.ternary.caxis"
_path_str = "layout.ternary.caxis.title"
_valid_props = {"font", "text"}
# font
# ----
@property
def font(self):
"""
Sets this axis' title font. Note that the title's font used to
be customized by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.ternary.caxis.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.ternary.caxis.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of this axis. Note that before the existence of
`title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this axis' title font. Note that the title's font
used to be customized by the now deprecated `titlefont`
attribute.
text
Sets the title of this axis. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.ternary.caxis.Title`
font
Sets this axis' title font. Note that the title's font
used to be customized by the now deprecated `titlefont`
attribute.
text
Sets the title of this axis. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.ternary.caxis.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.ternary.caxis.Title`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 32.589286 | 85 | 0.538995 |
d4f7fe4604e9a8f4833ee57085eae49a0decf90a
| 16,788 |
py
|
Python
|
wstools/c14n.py
|
Neurones67/wstools-py3
|
ce2c368db7d197664165497d83206dd8d3952659
|
[
"BSD-3-Clause"
] | null | null | null |
wstools/c14n.py
|
Neurones67/wstools-py3
|
ce2c368db7d197664165497d83206dd8d3952659
|
[
"BSD-3-Clause"
] | 1 |
2018-04-25T09:08:14.000Z
|
2018-04-25T09:08:14.000Z
|
wstools/c14n.py
|
Neurones67/wstools-py3
|
ce2c368db7d197664165497d83206dd8d3952659
|
[
"BSD-3-Clause"
] | 2 |
2018-04-25T06:29:30.000Z
|
2022-03-09T11:27:49.000Z
|
#! /usr/bin/env python
import string
import sys
from xml.dom import Node
try:
from xml.ns import XMLNS
except ImportError:
class XMLNS(object):
BASE = "http://www.w3.org/2000/xmlns/"
XML = "http://www.w3.org/XML/1998/namespace"
try:
from io import StringIO
except ImportError:
from io import StringIO
'''XML Canonicalization
Patches Applied to xml.dom.ext.c14n:
http://sourceforge.net/projects/pyxml/
[ 1444526 ] c14n.py: http://www.w3.org/TR/xml-exc-c14n/ fix
-- includes [ 829905 ] c14n.py fix for bug #825115,
Date Submitted: 2003-10-24 23:43
-- include dependent namespace declarations declared in ancestor nodes
(checking attributes and tags),
-- handle InclusiveNamespaces PrefixList parameter
This module generates canonical XML of a document or element.
http://www.w3.org/TR/2001/REC-xml-c14n-20010315
and includes a prototype of exclusive canonicalization
http://www.w3.org/Signature/Drafts/xml-exc-c14n
Requires PyXML 0.7.0 or later.
Known issues if using Ft.Lib.pDomlette:
1. Unicode
2. does not white space normalize attributes of type NMTOKEN and ID?
3. seems to be include "\n" after importing external entities?
Note, this version processes a DOM tree, and consequently it processes
namespace nodes as attributes, not from a node's namespace axis. This
permits simple document and element canonicalization without
XPath. When XPath is used, the XPath result node list is passed and used to
determine if the node is in the XPath result list, but little else.
Authors:
"Joseph M. Reagle Jr." <[email protected]>
"Rich Salz" <[email protected]>
$Date$ by $Author$
'''
_copyright = '''Copyright 2001, Zolera Systems Inc. All Rights Reserved.
Copyright 2001, MIT. All Rights Reserved.
Distributed under the terms of:
Python 2.0 License or later.
http://www.python.org/2.0.1/license.html
or
W3C Software License
http://www.w3.org/Consortium/Legal/copyright-software-19980720
'''
def _attrs(E):
return (E.attributes and list(E.attributes.values())) or []
def _children(E):
return E.childNodes or []
def _IN_XML_NS(n):
return n.name.startswith("xmlns")
def _inclusive(n):
return n.unsuppressedPrefixes is None
# Does a document/PI has lesser/greater document order than the
# first element?
_LesserElement, _Element, _GreaterElement = list(range(3))
if sys.version_info[0] > 2:
def cmp(a, b):
return (a > b) - (a < b)
def _sorter(n1, n2):
'''_sorter(n1,n2) -> int
Sorting predicate for non-NS attributes.
'''
i = cmp(n1.namespaceURI, n2.namespaceURI)
if i:
return i
return cmp(n1.localName, n2.localName)
def _sorter_ns(n1, n2):
'''_sorter_ns((n,v),(n,v)) -> int
"(an empty namespace URI is lexicographically least)."
'''
if n1[0] == 'xmlns':
return -1
if n2[0] == 'xmlns':
return 1
return cmp(n1[0], n2[0])
def _utilized(n, node, other_attrs, unsuppressedPrefixes):
'''_utilized(n, node, other_attrs, unsuppressedPrefixes) -> boolean
Return true if that nodespace is utilized within the node
'''
if n.startswith('xmlns:'):
n = n[6:]
elif n.startswith('xmlns'):
n = n[5:]
if (n == "" and node.prefix in ["#default", None]) or \
n == node.prefix or n in unsuppressedPrefixes:
return 1
for attr in other_attrs:
if n == attr.prefix:
return 1
# For exclusive need to look at attributes
if unsuppressedPrefixes is not None:
for attr in _attrs(node):
if n == attr.prefix:
return 1
return 0
def _inclusiveNamespacePrefixes(node, context, unsuppressedPrefixes):
'''http://www.w3.org/TR/xml-exc-c14n/
InclusiveNamespaces PrefixList parameter, which lists namespace prefixes that
are handled in the manner described by the Canonical XML Recommendation
'''
inclusive = []
if node.prefix:
usedPrefixes = ['xmlns:%s' % node.prefix]
else:
usedPrefixes = ['xmlns']
for a in _attrs(node):
if a.nodeName.startswith('xmlns') or not a.prefix:
continue
usedPrefixes.append('xmlns:%s' % a.prefix)
unused_namespace_dict = {}
for attr in context:
n = attr.nodeName
if n in unsuppressedPrefixes:
inclusive.append(attr)
elif n.startswith('xmlns:') and n[6:] in unsuppressedPrefixes:
inclusive.append(attr)
elif n.startswith('xmlns') and n[5:] in unsuppressedPrefixes:
inclusive.append(attr)
elif attr.nodeName in usedPrefixes:
inclusive.append(attr)
elif n.startswith('xmlns:'):
unused_namespace_dict[n] = attr.value
return inclusive, unused_namespace_dict
# _in_subset = lambda subset, node: not subset or node in subset
def _in_subset(subset, node):
return subset is None or node in subset # rich's tweak
class _implementation(object):
'''Implementation class for C14N.
This accompanies a node during it's
processing and includes the parameters and processing state.
'''
# Handler for each node type; populated during module instantiation.
handlers = {}
def __init__(self, node, write, **kw):
'''Create and run the implementation.'''
self.write = write
self.subset = kw.get('subset')
self.comments = kw.get('comments', 0)
self.unsuppressedPrefixes = kw.get('unsuppressedPrefixes')
nsdict = kw.get('nsdict', {'xml': XMLNS.XML, 'xmlns': XMLNS.BASE})
# Processing state.
self.state = (nsdict, {'xml': ''}, {}, {}) # 0422
if node.nodeType == Node.DOCUMENT_NODE:
self._do_document(node)
elif node.nodeType == Node.ELEMENT_NODE:
self.documentOrder = _Element # At document element
if not _inclusive(self):
inherited, unused = _inclusiveNamespacePrefixes(node, self._inherit_context(node),
self.unsuppressedPrefixes)
self._do_element(node, inherited, unused=unused)
else:
inherited = self._inherit_context(node)
self._do_element(node, inherited)
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
pass
else:
raise TypeError(str(node))
def _inherit_context(self, node):
'''_inherit_context(self, node) -> list
Scan ancestors of attribute and namespace context. Used only
for single element node canonicalization, not for subset
canonicalization.
'''
# Collect the initial list of xml:foo attributes.
xmlattrs = list(filter(_IN_XML_NS, _attrs(node)))
# Walk up and get all xml:XXX attributes we inherit.
inherited, parent = [], node.parentNode
while parent and parent.nodeType == Node.ELEMENT_NODE:
for a in filter(_IN_XML_NS, _attrs(parent)):
n = a.localName
if n not in xmlattrs:
xmlattrs.append(n)
inherited.append(a)
parent = parent.parentNode
return inherited
def _do_document(self, node):
'''_do_document(self, node) -> None
Process a document node. documentOrder holds whether the document
element has been encountered such that PIs/comments can be written
as specified.
'''
self.documentOrder = _LesserElement
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.documentOrder = _Element # At document element
self._do_element(child)
self.documentOrder = _GreaterElement # After document element
elif child.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
self._do_pi(child)
elif child.nodeType == Node.COMMENT_NODE:
self._do_comment(child)
elif child.nodeType == Node.DOCUMENT_TYPE_NODE:
pass
else:
raise TypeError(str(child))
handlers[Node.DOCUMENT_NODE] = _do_document
def _do_text(self, node):
'''_do_text(self, node) -> None
Process a text or CDATA node. Render various special characters
as their C14N entity representations.
'''
if not _in_subset(self.subset, node):
return
s = string.replace(node.data, "&", "&")
s = string.replace(s, "<", "<")
s = string.replace(s, ">", ">")
s = string.replace(s, "\015", "
")
if s:
self.write(s)
handlers[Node.TEXT_NODE] = _do_text
handlers[Node.CDATA_SECTION_NODE] = _do_text
def _do_pi(self, node):
'''_do_pi(self, node) -> None
Process a PI node. Render a leading or trailing #xA if the
document order of the PI is greater or lesser (respectively)
than the document element.
'''
if not _in_subset(self.subset, node):
return
W = self.write
if self.documentOrder == _GreaterElement:
W('\n')
W('<?')
W(node.nodeName)
s = node.data
if s:
W(' ')
W(s)
W('?>')
if self.documentOrder == _LesserElement:
W('\n')
handlers[Node.PROCESSING_INSTRUCTION_NODE] = _do_pi
def _do_comment(self, node):
'''_do_comment(self, node) -> None
Process a comment node. Render a leading or trailing #xA if the
document order of the comment is greater or lesser (respectively)
than the document element.
'''
if not _in_subset(self.subset, node):
return
if self.comments:
W = self.write
if self.documentOrder == _GreaterElement:
W('\n')
W('<!--')
W(node.data)
W('-->')
if self.documentOrder == _LesserElement:
W('\n')
handlers[Node.COMMENT_NODE] = _do_comment
def _do_attr(self, n, value):
''''_do_attr(self, node) -> None
Process an attribute.
'''
W = self.write
W(' ')
W(n)
W('="')
s = string.replace(value, "&", "&")
s = string.replace(s, "<", "<")
s = string.replace(s, '"', '"')
s = string.replace(s, '\011', '	')
s = string.replace(s, '\012', '
')
s = string.replace(s, '\015', '
')
W(s)
W('"')
def _do_element(self, node, initial_other_attrs=[], unused=None):
'''_do_element(self, node, initial_other_attrs = [], unused = {}) -> None
Process an element (and its children).
'''
# Get state (from the stack) make local copies.
# ns_parent -- NS declarations in parent
# ns_rendered -- NS nodes rendered by ancestors
# ns_local -- NS declarations relevant to this element
# xml_attrs -- Attributes in XML namespace from parent
# xml_attrs_local -- Local attributes in XML namespace.
# ns_unused_inherited -- not rendered namespaces, used for exclusive
ns_parent, ns_rendered, xml_attrs = \
self.state[0], self.state[1].copy(), self.state[2].copy() # 0422
ns_unused_inherited = unused
if unused is None:
ns_unused_inherited = self.state[3].copy()
ns_local = ns_parent.copy()
inclusive = _inclusive(self)
xml_attrs_local = {}
# Divide attributes into NS, XML, and others.
other_attrs = []
in_subset = _in_subset(self.subset, node)
for a in initial_other_attrs + _attrs(node):
if a.namespaceURI == XMLNS.BASE:
n = a.nodeName
if n == "xmlns:":
n = "xmlns" # DOM bug workaround
ns_local[n] = a.nodeValue
elif a.namespaceURI == XMLNS.XML:
# 020925 Test to see if attribute node in subset
if inclusive or (in_subset and _in_subset(self.subset, a)):
xml_attrs_local[a.nodeName] = a # 0426
else:
if _in_subset(self.subset, a): # 020925 Test to see if attribute node in subset
other_attrs.append(a)
# # exclusive, might need to define xmlns:prefix here
# if not inclusive and a.prefix is not None and not ns_rendered.has_key('xmlns:%s' %a.prefix):
# ns_local['xmlns:%s' %a.prefix] = ??
# add local xml:foo attributes to ancestor's xml:foo attributes
xml_attrs.update(xml_attrs_local)
# Render the node
W, name = self.write, None
if in_subset:
name = node.nodeName
if not inclusive:
if node.prefix is not None:
prefix = 'xmlns:%s' % node.prefix
else:
prefix = 'xmlns'
if prefix not in ns_rendered and prefix not in ns_local:
if prefix not in ns_unused_inherited:
raise RuntimeError('For exclusive c14n, unable to map prefix "%s" in %s' % (
prefix, node))
ns_local[prefix] = ns_unused_inherited[prefix]
del ns_unused_inherited[prefix]
W('<')
W(name)
# Create list of NS attributes to render.
ns_to_render = []
for n, v in list(ns_local.items()):
# If default namespace is XMLNS.BASE or empty,
# and if an ancestor was the same
if n == "xmlns" and v in [XMLNS.BASE, ''] \
and ns_rendered.get('xmlns') in [XMLNS.BASE, '', None]:
continue
# "omit namespace node with local name xml, which defines
# the xml prefix, if its string value is
# http://www.w3.org/XML/1998/namespace."
if n in ["xmlns:xml", "xml"] \
and v in ['http://www.w3.org/XML/1998/namespace']:
continue
# If not previously rendered
# and it's inclusive or utilized
if (n, v) not in list(ns_rendered.items()):
if inclusive or _utilized(n, node, other_attrs, self.unsuppressedPrefixes):
ns_to_render.append((n, v))
elif not inclusive:
ns_unused_inherited[n] = v
# Sort and render the ns, marking what was rendered.
ns_to_render.sort(_sorter_ns)
for n, v in ns_to_render:
self._do_attr(n, v)
ns_rendered[n] = v # 0417
# If exclusive or the parent is in the subset, add the local xml attributes
# Else, add all local and ancestor xml attributes
# Sort and render the attributes.
if not inclusive or _in_subset(self.subset, node.parentNode): # 0426
other_attrs.extend(list(xml_attrs_local.values()))
else:
other_attrs.extend(list(xml_attrs.values()))
other_attrs.sort(_sorter)
for a in other_attrs:
self._do_attr(a.nodeName, a.value)
W('>')
# Push state, recurse, pop state.
state, self.state = self.state, (ns_local,
ns_rendered, xml_attrs, ns_unused_inherited)
for c in _children(node):
_implementation.handlers[c.nodeType](self, c)
self.state = state
if name:
W('</%s>' % name)
handlers[Node.ELEMENT_NODE] = _do_element
def Canonicalize(node, output=None, **kw):
'''Canonicalize(node, output=None, **kw) -> UTF-8
Canonicalize a DOM document/element node and all descendents.
Return the text; if output is specified then output.write will
be called to output the text and None will be returned
Keyword parameters:
nsdict: a dictionary of prefix:uri namespace entries
assumed to exist in the surrounding context
comments: keep comments if non-zero (default is 0)
subset: Canonical XML subsetting resulting from XPath
(default is [])
unsuppressedPrefixes: do exclusive C14N, and this specifies the
prefixes that should be inherited.
'''
if output:
_implementation(*(node, output.write), **kw)
else:
s = StringIO.StringIO()
_implementation(*(node, s.write), **kw)
return s.getvalue()
| 34.472279 | 109 | 0.585835 |
80c9c7aec75883c5ed38b5d882ac3ac433bfb5cf
| 2,703 |
py
|
Python
|
question_generation/selection.py
|
willywsm1013/transformers-for-question-generation
|
16f6fd48e61e1f0ce9ce54b7d9d3f01260257082
|
[
"Apache-2.0"
] | 2 |
2022-02-17T09:43:05.000Z
|
2022-02-20T11:14:15.000Z
|
question_generation/selection.py
|
willywsm1013/transformers-for-question-generation
|
16f6fd48e61e1f0ce9ce54b7d9d3f01260257082
|
[
"Apache-2.0"
] | 9 |
2020-11-13T17:51:46.000Z
|
2022-03-12T00:46:15.000Z
|
question_generation/selection.py
|
willywsm1013/transformers-for-question-generation
|
16f6fd48e61e1f0ce9ce54b7d9d3f01260257082
|
[
"Apache-2.0"
] | 1 |
2021-02-12T16:31:47.000Z
|
2021-02-12T16:31:47.000Z
|
import json
import os
import sys
import re
import numpy as np
from nltk.tokenize import TreebankWordTokenizer
nltk_tokenizer = TreebankWordTokenizer()
def process(text):
toks = nltk_tokenizer.tokenize(text)
return ' '.join([tok.replace("''", '"').replace("``", '"') for tok in toks]).lower()
def get_log_a_prob(elem):
'''
a_log_porb = log(P(start)) + log(P(end))
return :
average log prob of start position and end position = a_log_prob / 2
'''
a_prob = np.array(elem['qa_prob'])
a_log_prob = np.log(a_prob+1e-12)/2
return a_log_prob
def get_log_q_prob(elem):
'''
q_log_prob = log(P(q))
return :
average log prob = q_log_prob / len(q)
'''
cq_log_prob = np.array(elem['lm_log_prob'])
cq_token_num = np.array(elem['lm_token_num'])
cq_log_prob_ln = cq_log_prob/cq_token_num
return cq_log_prob_ln
def sort_by_score(idx, scores):
idx = sorted(idx, key=lambda i:scores[i], reverse=True)
return idx
with open(sys.argv[1]) as f:
data = json.load(f)
input_file=sys.argv[1]
selection = sys.argv[2]
num = int(sys.argv[3])
d, filename = os.path.split(input_file)
tmp_dir = os.path.join(d, 'tmp')
if not os.path.isdir(tmp_dir):
os.makedirs(tmp_dir)
golden = os.path.join(tmp_dir, 'golden.txt')
generated = os.path.join(tmp_dir, 'generated.txt')
squad_file = os.path.join(tmp_dir, 'generated.json')
with open(golden,'w') as gt_file, open(generated, 'w') as pred_file:
select_data = []
for elem in data:
log_a_prob = get_log_a_prob(elem)
log_q_prob = get_log_q_prob(elem)
log_qa_prob = log_a_prob+log_q_prob
idx = list(range(len(elem['pred_question'])))
if selection == 'qg':
idx = sort_by_score(idx, elem['pred_question_log_prob'])
elif selection == 'qg_ln':
probs = np.array(elem['pred_question_log_prob'])
token_nums = np.array(elem['pred_question_token_num'])
idx = sort_by_score(idx, probs/token_nums)
elif selection == 'a':
idx = sort_by_score(idx, log_a_prob)
elif selection == 'qa':
idx = sort_by_score(idx, log_qa_prob)
pred_question = [process(elem['pred_question'][i]) for i in idx[:num]]
if num == 1:
print (process(elem['question']), file=gt_file)
print (pred_question[0], file=pred_file)
select_data.append({'context':elem['context'], 'answers':elem['answers'], 'question':elem['question'], 'pred_question':pred_question, 'id':elem['id']})
with open(squad_file, 'w') as f:
json.dump(select_data, f, indent=1, ensure_ascii=False)
| 31.430233 | 159 | 0.63189 |
b7e8281bceb256b493dbdd82b795c7f8d37ba37a
| 8,381 |
py
|
Python
|
Lib/fontTools/ttLib/tables/G__l_a_t.py
|
twardoch/fonttools-py27
|
75b852d3f59fc0d03c6e78581530597d4c6368a1
|
[
"MIT",
"BSD-3-Clause"
] | 240 |
2021-01-11T14:49:24.000Z
|
2022-03-29T22:33:49.000Z
|
Lib/fontTools/ttLib/tables/G__l_a_t.py
|
twardoch/fonttools-py27
|
75b852d3f59fc0d03c6e78581530597d4c6368a1
|
[
"MIT",
"BSD-3-Clause"
] | 77 |
2021-01-12T20:23:30.000Z
|
2022-03-28T12:14:34.000Z
|
Lib/fontTools/ttLib/tables/G__l_a_t.py
|
twardoch/fonttools-py27
|
75b852d3f59fc0d03c6e78581530597d4c6368a1
|
[
"MIT",
"BSD-3-Clause"
] | 28 |
2021-01-17T05:44:11.000Z
|
2022-01-11T19:58:46.000Z
|
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from itertools import *
from functools import partial
from . import DefaultTable
from . import grUtils
import struct, operator, warnings
Glat_format_0 = """
> # big endian
version: 16.16F
"""
Glat_format_3 = """
>
version: 16.16F
compression:L # compression scheme or reserved
"""
Glat_format_1_entry = """
>
attNum: B # Attribute number of first attribute
num: B # Number of attributes in this run
"""
Glat_format_23_entry = """
>
attNum: H # Attribute number of first attribute
num: H # Number of attributes in this run
"""
Glat_format_3_octabox_metrics = """
>
subboxBitmap: H # Which subboxes exist on 4x4 grid
diagNegMin: B # Defines minimum negatively-sloped diagonal (si)
diagNegMax: B # Defines maximum negatively-sloped diagonal (sa)
diagPosMin: B # Defines minimum positively-sloped diagonal (di)
diagPosMax: B # Defines maximum positively-sloped diagonal (da)
"""
Glat_format_3_subbox_entry = """
>
left: B # xi
right: B # xa
bottom: B # yi
top: B # ya
diagNegMin: B # Defines minimum negatively-sloped diagonal (si)
diagNegMax: B # Defines maximum negatively-sloped diagonal (sa)
diagPosMin: B # Defines minimum positively-sloped diagonal (di)
diagPosMax: B # Defines maximum positively-sloped diagonal (da)
"""
class _Object() :
pass
class _Dict(dict) :
pass
class table_G__l_a_t(DefaultTable.DefaultTable):
'''
Support Graphite Glat tables
'''
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.scheme = 0
def decompile(self, data, ttFont):
sstruct.unpack2(Glat_format_0, data, self)
if self.version <= 1.9:
decoder = partial(self.decompileAttributes12,fmt=Glat_format_1_entry)
elif self.version <= 2.9:
decoder = partial(self.decompileAttributes12,fmt=Glat_format_23_entry)
elif self.version >= 3.0:
(data, self.scheme) = grUtils.decompress(data)
sstruct.unpack2(Glat_format_3, data, self)
self.hasOctaboxes = (self.compression & 1) == 1
decoder = self.decompileAttributes3
gloc = ttFont['Gloc']
self.attributes = {}
count = 0
for s,e in zip(gloc,gloc[1:]):
self.attributes[ttFont.getGlyphName(count)] = decoder(data[s:e])
count += 1
def decompileAttributes12(self, data, fmt):
attributes = _Dict()
while len(data) > 3:
e, data = sstruct.unpack2(fmt, data, _Object())
keys = range(e.attNum, e.attNum+e.num)
if len(data) >= 2 * e.num :
vals = struct.unpack_from(('>%dh' % e.num), data)
attributes.update(zip(keys,vals))
data = data[2*e.num:]
return attributes
def decompileAttributes3(self, data):
if self.hasOctaboxes:
o, data = sstruct.unpack2(Glat_format_3_octabox_metrics, data, _Object())
numsub = bin(o.subboxBitmap).count("1")
o.subboxes = []
for b in range(numsub):
if len(data) >= 8 :
subbox, data = sstruct.unpack2(Glat_format_3_subbox_entry,
data, _Object())
o.subboxes.append(subbox)
attrs = self.decompileAttributes12(data, Glat_format_23_entry)
if self.hasOctaboxes:
attrs.octabox = o
return attrs
def compile(self, ttFont):
data = sstruct.pack(Glat_format_0, self)
if self.version <= 1.9:
encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry)
elif self.version <= 2.9:
encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry)
elif self.version >= 3.0:
self.compression = (self.scheme << 27) + (1 if self.hasOctaboxes else 0)
data = sstruct.pack(Glat_format_3, self)
encoder = self.compileAttributes3
glocs = []
for n in range(len(self.attributes)):
glocs.append(len(data))
data += encoder(self.attributes[ttFont.getGlyphName(n)])
glocs.append(len(data))
ttFont['Gloc'].set(glocs)
if self.version >= 3.0:
data = grUtils.compress(self.scheme, data)
return data
def compileAttributes12(self, attrs, fmt):
data = b""
for e in grUtils.entries(attrs):
data += sstruct.pack(fmt, {'attNum' : e[0], 'num' : e[1]}) + \
struct.pack(('>%dh' % len(e[2])), *e[2])
return data
def compileAttributes3(self, attrs):
if self.hasOctaboxes:
o = attrs.octabox
data = sstruct.pack(Glat_format_3_octabox_metrics, o)
numsub = bin(o.subboxBitmap).count("1")
for b in range(numsub) :
data += sstruct.pack(Glat_format_3_subbox_entry, o.subboxes[b])
else:
data = ""
return data + self.compileAttributes12(attrs, Glat_format_23_entry)
def toXML(self, writer, ttFont):
writer.simpletag('version', version=self.version, compressionScheme=self.scheme)
writer.newline()
for n, a in sorted(self.attributes.items(), key=lambda x:ttFont.getGlyphID(x[0])):
writer.begintag('glyph', name=n)
writer.newline()
if hasattr(a, 'octabox'):
o = a.octabox
formatstring, names, fixes = sstruct.getformat(Glat_format_3_octabox_metrics)
vals = {}
for k in names:
if k == 'subboxBitmap': continue
vals[k] = "{:.3f}%".format(getattr(o, k) * 100. / 255)
vals['bitmap'] = "{:0X}".format(o.subboxBitmap)
writer.begintag('octaboxes', **vals)
writer.newline()
formatstring, names, fixes = sstruct.getformat(Glat_format_3_subbox_entry)
for s in o.subboxes:
vals = {}
for k in names:
vals[k] = "{:.3f}%".format(getattr(s, k) * 100. / 255)
writer.simpletag('octabox', **vals)
writer.newline()
writer.endtag('octaboxes')
writer.newline()
for k, v in sorted(a.items()):
writer.simpletag('attribute', index=k, value=v)
writer.newline()
writer.endtag('glyph')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == 'version' :
self.version = float(safeEval(attrs['version']))
self.scheme = int(safeEval(attrs['compressionScheme']))
if name != 'glyph' : return
if not hasattr(self, 'attributes'):
self.attributes = {}
gname = attrs['name']
attributes = _Dict()
for element in content:
if not isinstance(element, tuple): continue
tag, attrs, subcontent = element
if tag == 'attribute' :
k = int(safeEval(attrs['index']))
v = int(safeEval(attrs['value']))
attributes[k]=v
elif tag == 'octaboxes':
self.hasOctaboxes = True
o = _Object()
o.subboxBitmap = int(attrs['bitmap'], 16)
o.subboxes = []
del attrs['bitmap']
for k, v in attrs.items():
setattr(o, k, int(float(v[:-1]) * 255. / 100. + 0.5))
for element in subcontent:
if not isinstance(element, tuple): continue
(tag, attrs, subcontent) = element
so = _Object()
for k, v in attrs.items():
setattr(so, k, int(float(v[:-1]) * 255. / 100. + 0.5))
o.subboxes.append(so)
attributes.octabox = o
self.attributes[gname] = attributes
| 38.269406 | 93 | 0.553275 |
1ea0a4b5c4a439fb871618fbe7526e7b231296d3
| 2,529 |
py
|
Python
|
crypto_wallet.py
|
ChrisKwiat84/Cryptocurrency-Wallet-2.19.22
|
1b9f6b234006c5e1e2cf25d4748ece6b0c55a590
|
[
"ADSL"
] | null | null | null |
crypto_wallet.py
|
ChrisKwiat84/Cryptocurrency-Wallet-2.19.22
|
1b9f6b234006c5e1e2cf25d4748ece6b0c55a590
|
[
"ADSL"
] | null | null | null |
crypto_wallet.py
|
ChrisKwiat84/Cryptocurrency-Wallet-2.19.22
|
1b9f6b234006c5e1e2cf25d4748ece6b0c55a590
|
[
"ADSL"
] | null | null | null |
# Cryptocurrency Wallet
################################################################################
# This file contains the Ethereum transaction functions that you have created throughout this module’s lessons. By using import statements, you will integrate this `crypto_wallet.py` Python script into the Fintech Finder interface program that is found in the `fintech_finder.py` file.
################################################################################
# Imports
import os
import requests
from dotenv import load_dotenv
load_dotenv()
from bip44 import Wallet
from web3 import Account
from web3 import middleware
from web3.gas_strategies.time_based import medium_gas_price_strategy
################################################################################
# Wallet functionality
def generate_account():
"""Create a digital wallet and Ethereum account from a mnemonic seed phrase."""
# Fetch mnemonic from environment variable.
mnemonic = os.getenv("MNEMONIC")
#mnemonic = "they right silly whisper hold toe shrimp true mass fabric violin wash"
# Create Wallet Object
wallet = Wallet(mnemonic)
# Derive Ethereum Private Key
private, public = wallet.derive_account("eth", address_index = 1)
# Convert private key into an Ethereum account
account = Account.privateKeyToAccount(private)
return account
def get_balance(w3, address):
"""Using an Ethereum account address access the balance of Ether"""
# Get balance of address in Wei
wei_balance = w3.eth.get_balance(address)
# Convert Wei value to ether
ether = w3.fromWei(wei_balance, "ether")
# Return the value in ether
return ether
def send_transaction(w3, account, to, wage):
"""Send an authorized transaction to the Ganache blockchain."""
# Set gas price strategy
w3.eth.setGasPriceStrategy(medium_gas_price_strategy)
# Convert eth amount to Wei
value = w3.toWei(wage, "ether")
# Calculate gas estimate
gasEstimate = w3.eth.estimateGas({"to": to, "from": account.address, "value": value})
# Construct a raw transaction
raw_tx = {
"to": to,
"from": account.address,
"value": value,
"gas": gasEstimate,
"gasPrice": 0,
"nonce": w3.eth.getTransactionCount(account.address)
}
# Sign the raw transaction with ethereum account
signed_tx = account.signTransaction(raw_tx)
# Send the signed transactions
return w3.eth.sendRawTransaction(signed_tx.rawTransaction)
| 33.276316 | 285 | 0.652432 |
b0cdf334cfa1279c668f161c74a6cb34a5ee352d
| 10,121 |
py
|
Python
|
homeassistant/components/switch/rainmachine.py
|
don66/home-assistant
|
a277470363c0758bb305410aad49c257ff8bac40
|
[
"Apache-2.0"
] | 7 |
2018-08-03T10:15:36.000Z
|
2019-03-25T13:31:55.000Z
|
homeassistant/components/switch/rainmachine.py
|
sara0871/https-wakatime.com-android-studio
|
5a15b2c036b332c17d5f6a06664378e9273d684f
|
[
"Apache-2.0"
] | 3 |
2021-09-08T03:06:43.000Z
|
2022-03-12T00:56:04.000Z
|
homeassistant/components/switch/rainmachine.py
|
sara0871/https-wakatime.com-android-studio
|
5a15b2c036b332c17d5f6a06664378e9273d684f
|
[
"Apache-2.0"
] | 3 |
2018-10-09T08:37:48.000Z
|
2019-11-16T08:32:27.000Z
|
"""
This component provides support for RainMachine programs and zones.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/switch.rainmachine/
"""
import logging
from homeassistant.components.rainmachine import (
CONF_ZONE_RUN_TIME, DATA_RAINMACHINE, DEFAULT_ZONE_RUN,
PROGRAM_UPDATE_TOPIC, ZONE_UPDATE_TOPIC, RainMachineEntity)
from homeassistant.const import ATTR_ID
from homeassistant.components.switch import SwitchDevice
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect, async_dispatcher_send)
DEPENDENCIES = ['rainmachine']
_LOGGER = logging.getLogger(__name__)
ATTR_AREA = 'area'
ATTR_CS_ON = 'cs_on'
ATTR_CURRENT_CYCLE = 'current_cycle'
ATTR_CYCLES = 'cycles'
ATTR_DELAY = 'delay'
ATTR_DELAY_ON = 'delay_on'
ATTR_FIELD_CAPACITY = 'field_capacity'
ATTR_NO_CYCLES = 'number_of_cycles'
ATTR_PRECIP_RATE = 'sprinkler_head_precipitation_rate'
ATTR_RESTRICTIONS = 'restrictions'
ATTR_SLOPE = 'slope'
ATTR_SOAK = 'soak'
ATTR_SOIL_TYPE = 'soil_type'
ATTR_SPRINKLER_TYPE = 'sprinkler_head_type'
ATTR_STATUS = 'status'
ATTR_SUN_EXPOSURE = 'sun_exposure'
ATTR_VEGETATION_TYPE = 'vegetation_type'
ATTR_ZONES = 'zones'
DAYS = [
'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday',
'Sunday'
]
PROGRAM_STATUS_MAP = {0: 'Not Running', 1: 'Running', 2: 'Queued'}
SOIL_TYPE_MAP = {
0: 'Not Set',
1: 'Clay Loam',
2: 'Silty Clay',
3: 'Clay',
4: 'Loam',
5: 'Sandy Loam',
6: 'Loamy Sand',
7: 'Sand',
8: 'Sandy Clay',
9: 'Silt Loam',
10: 'Silt',
99: 'Other'
}
SLOPE_TYPE_MAP = {
0: 'Not Set',
1: 'Flat',
2: 'Moderate',
3: 'High',
4: 'Very High',
99: 'Other'
}
SPRINKLER_TYPE_MAP = {
0: 'Not Set',
1: 'Popup Spray',
2: 'Rotors',
3: 'Surface Drip',
4: 'Bubblers Drip',
99: 'Other'
}
SUN_EXPOSURE_MAP = {
0: 'Not Set',
1: 'Full Sun',
2: 'Partial Shade',
3: 'Full Shade'
}
VEGETATION_MAP = {
0: 'Not Set',
2: 'Cool Season Grass',
3: 'Fruit Trees',
4: 'Flowers',
5: 'Vegetables',
6: 'Citrus',
7: 'Trees and Bushes',
9: 'Drought Tolerant Plants',
10: 'Warm Season Grass',
99: 'Other'
}
async def async_setup_platform(
hass, config, async_add_devices, discovery_info=None):
"""Set up the RainMachine Switch platform."""
if discovery_info is None:
return
_LOGGER.debug('Config received: %s', discovery_info)
zone_run_time = discovery_info.get(CONF_ZONE_RUN_TIME, DEFAULT_ZONE_RUN)
rainmachine = hass.data[DATA_RAINMACHINE]
entities = []
programs = await rainmachine.client.programs.all()
for program in programs:
if not program.get('active'):
continue
_LOGGER.debug('Adding program: %s', program)
entities.append(RainMachineProgram(rainmachine, program))
zones = await rainmachine.client.zones.all()
for zone in zones:
if not zone.get('active'):
continue
_LOGGER.debug('Adding zone: %s', zone)
entities.append(RainMachineZone(rainmachine, zone, zone_run_time))
async_add_devices(entities, True)
class RainMachineSwitch(RainMachineEntity, SwitchDevice):
"""A class to represent a generic RainMachine switch."""
def __init__(self, rainmachine, switch_type, obj):
"""Initialize a generic RainMachine switch."""
super().__init__(rainmachine)
self._name = obj['name']
self._obj = obj
self._rainmachine_entity_id = obj['uid']
self._switch_type = switch_type
@property
def icon(self) -> str:
"""Return the icon."""
return 'mdi:water'
@property
def is_enabled(self) -> bool:
"""Return whether the entity is enabled."""
return self._obj.get('active')
@property
def unique_id(self) -> str:
"""Return a unique, HASS-friendly identifier for this entity."""
return '{0}_{1}_{2}'.format(
self.rainmachine.device_mac.replace(':', ''), self._switch_type,
self._rainmachine_entity_id)
@callback
def _program_updated(self):
"""Update state, trigger updates."""
self.async_schedule_update_ha_state(True)
class RainMachineProgram(RainMachineSwitch):
"""A RainMachine program."""
def __init__(self, rainmachine, obj):
"""Initialize a generic RainMachine switch."""
super().__init__(rainmachine, 'program', obj)
@property
def is_on(self) -> bool:
"""Return whether the program is running."""
return bool(self._obj.get('status'))
@property
def zones(self) -> list:
"""Return a list of active zones associated with this program."""
return [z for z in self._obj['wateringTimes'] if z['active']]
async def async_added_to_hass(self):
"""Register callbacks."""
async_dispatcher_connect(
self.hass, PROGRAM_UPDATE_TOPIC, self._program_updated)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the program off."""
from regenmaschine.errors import RequestError
try:
await self.rainmachine.client.programs.stop(
self._rainmachine_entity_id)
async_dispatcher_send(self.hass, PROGRAM_UPDATE_TOPIC)
except RequestError as err:
_LOGGER.error(
'Unable to turn off program "%s": %s', self.unique_id,
str(err))
async def async_turn_on(self, **kwargs) -> None:
"""Turn the program on."""
from regenmaschine.errors import RequestError
try:
await self.rainmachine.client.programs.start(
self._rainmachine_entity_id)
async_dispatcher_send(self.hass, PROGRAM_UPDATE_TOPIC)
except RequestError as err:
_LOGGER.error(
'Unable to turn on program "%s": %s', self.unique_id, str(err))
async def async_update(self) -> None:
"""Update info for the program."""
from regenmaschine.errors import RequestError
try:
self._obj = await self.rainmachine.client.programs.get(
self._rainmachine_entity_id)
self._attrs.update({
ATTR_ID: self._obj['uid'],
ATTR_SOAK: self._obj.get('soak'),
ATTR_STATUS: PROGRAM_STATUS_MAP[self._obj.get('status')],
ATTR_ZONES: ', '.join(z['name'] for z in self.zones)
})
except RequestError as err:
_LOGGER.error(
'Unable to update info for program "%s": %s', self.unique_id,
str(err))
class RainMachineZone(RainMachineSwitch):
"""A RainMachine zone."""
def __init__(self, rainmachine, obj, zone_run_time):
"""Initialize a RainMachine zone."""
super().__init__(rainmachine, 'zone', obj)
self._properties_json = {}
self._run_time = zone_run_time
@property
def is_on(self) -> bool:
"""Return whether the zone is running."""
return bool(self._obj.get('state'))
async def async_added_to_hass(self):
"""Register callbacks."""
async_dispatcher_connect(
self.hass, PROGRAM_UPDATE_TOPIC, self._program_updated)
async_dispatcher_connect(
self.hass, ZONE_UPDATE_TOPIC, self._program_updated)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the zone off."""
from regenmaschine.errors import RequestError
try:
await self.rainmachine.client.zones.stop(
self._rainmachine_entity_id)
except RequestError as err:
_LOGGER.error(
'Unable to turn off zone "%s": %s', self.unique_id, str(err))
async def async_turn_on(self, **kwargs) -> None:
"""Turn the zone on."""
from regenmaschine.errors import RequestError
try:
await self.rainmachine.client.zones.start(
self._rainmachine_entity_id, self._run_time)
except RequestError as err:
_LOGGER.error(
'Unable to turn on zone "%s": %s', self.unique_id, str(err))
async def async_update(self) -> None:
"""Update info for the zone."""
from regenmaschine.errors import RequestError
try:
self._obj = await self.rainmachine.client.zones.get(
self._rainmachine_entity_id)
self._properties_json = await self.rainmachine.client.zones.get(
self._rainmachine_entity_id, details=True)
self._attrs.update({
ATTR_ID:
self._obj['uid'],
ATTR_AREA:
self._properties_json.get('waterSense').get('area'),
ATTR_CURRENT_CYCLE:
self._obj.get('cycle'),
ATTR_FIELD_CAPACITY:
self._properties_json.get('waterSense')
.get('fieldCapacity'),
ATTR_NO_CYCLES:
self._obj.get('noOfCycles'),
ATTR_PRECIP_RATE:
self._properties_json.get('waterSense')
.get('precipitationRate'),
ATTR_RESTRICTIONS:
self._obj.get('restriction'),
ATTR_SLOPE:
SLOPE_TYPE_MAP.get(self._properties_json.get('slope')),
ATTR_SOIL_TYPE:
SOIL_TYPE_MAP.get(self._properties_json.get('sun')),
ATTR_SPRINKLER_TYPE:
SPRINKLER_TYPE_MAP.get(
self._properties_json.get('group_id')),
ATTR_SUN_EXPOSURE:
SUN_EXPOSURE_MAP.get(self._properties_json.get('sun')),
ATTR_VEGETATION_TYPE:
VEGETATION_MAP.get(self._obj.get('type')),
})
except RequestError as err:
_LOGGER.error(
'Unable to update info for zone "%s": %s', self.unique_id,
str(err))
| 31.141538 | 79 | 0.612094 |
8661752f8a16f4ba32091b9b7f130e341280cb8d
| 243 |
py
|
Python
|
App/apis/__init__.py
|
jonathan-hxj/FlaskTpp
|
0d24f432ab9321fd5d260f80183aec252e1cac35
|
[
"MIT"
] | null | null | null |
App/apis/__init__.py
|
jonathan-hxj/FlaskTpp
|
0d24f432ab9321fd5d260f80183aec252e1cac35
|
[
"MIT"
] | null | null | null |
App/apis/__init__.py
|
jonathan-hxj/FlaskTpp
|
0d24f432ab9321fd5d260f80183aec252e1cac35
|
[
"MIT"
] | null | null | null |
from App.apis.admin import admin_api
from App.apis.movie_admin import movie_client_api
from App.apis.movie_user import client_api
def init_api(app):
admin_api.init_app(app)
movie_client_api.init_app(app)
client_api.init_app(app)
| 24.3 | 49 | 0.798354 |
edbae480f12aa72f0e56ba4902ac3db8a9592922
| 5,285 |
py
|
Python
|
mw4/test/test_units/gui/mainWmixin/test_tabSettHorizon.py
|
Raddock/MountWizzard4
|
15efed77c1634461184e90a7cf6419eec0dec909
|
[
"Apache-2.0"
] | null | null | null |
mw4/test/test_units/gui/mainWmixin/test_tabSettHorizon.py
|
Raddock/MountWizzard4
|
15efed77c1634461184e90a7cf6419eec0dec909
|
[
"Apache-2.0"
] | null | null | null |
mw4/test/test_units/gui/mainWmixin/test_tabSettHorizon.py
|
Raddock/MountWizzard4
|
15efed77c1634461184e90a7cf6419eec0dec909
|
[
"Apache-2.0"
] | null | null | null |
############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
# Python v3.7.5
#
# Michael Würtenberger
# (c) 2019
#
# Licence APL2.0
#
###########################################################
# standard libraries
import unittest.mock as mock
import pytest
# external packages
# local import
from mw4.test.test_units.setupQt import setupQt
@pytest.fixture(autouse=True, scope='module')
def module_setup_teardown():
global app, spy, mwGlob, test
app, spy, mwGlob, test = setupQt()
app.config['showHemisphereW'] = True
app.toggleWindow(windowTag='showHemisphereW')
yield
def test_initConfig_1():
app.config['mainW'] = {}
suc = app.mainW.initConfig()
assert suc
def test_initConfig_2():
del app.config['mainW']
suc = app.mainW.initConfig()
assert suc
def test_storeConfig_1():
suc = app.storeConfig()
assert suc
def test_setupIcons():
suc = app.mainW.setupIcons()
assert suc
def test_loadHorizonMaskFile_1(qtbot):
with mock.patch.object(app.mainW,
'openFile',
return_value=('build', 'test', 'bpts')):
with mock.patch.object(app.data,
'loadHorizonP',
return_value=True):
with qtbot.waitSignal(app.message) as blocker:
suc = app.mainW.loadHorizonMask()
assert suc
assert ['Horizon mask [test] loaded', 0] == blocker.args
def test_loadHorizonMaskFile_2(qtbot):
with mock.patch.object(app.mainW,
'openFile',
return_value=('', '', '')):
suc = app.mainW.loadHorizonMask()
assert not suc
def test_loadHorizonMaskFile_3(qtbot):
with mock.patch.object(app.mainW,
'openFile',
return_value=('build', 'test', 'bpts')):
with mock.patch.object(app.data,
'loadHorizonP',
return_value=False):
with qtbot.waitSignal(app.message) as blocker:
suc = app.mainW.loadHorizonMask()
assert suc
assert ['Horizon mask [test] cannot no be loaded', 2] == blocker.args
def test_saveHorizonMaskFile_1(qtbot):
app.mainW.ui.horizonFileName.setText('test')
with mock.patch.object(app.mainW,
'saveFile',
return_value=('build', 'test', 'bpts')):
with mock.patch.object(app.data,
'saveHorizonP',
return_value=True):
with qtbot.waitSignal(app.message) as blocker:
suc = app.mainW.saveHorizonMask()
assert suc
assert ['Horizon mask [test] saved', 0] == blocker.args
def test_saveHorizonMaskFile_2(qtbot):
app.mainW.ui.horizonFileName.setText('')
with qtbot.waitSignal(app.message) as blocker:
suc = app.mainW.saveHorizonMask()
assert not suc
assert ['Horizon mask file name not given', 2] == blocker.args
def test_saveHorizonMaskFile_3(qtbot):
app.mainW.ui.horizonFileName.setText('test')
with mock.patch.object(app.mainW,
'saveFile',
return_value=('build', 'test', 'bpts')):
with mock.patch.object(app.data,
'saveHorizonP',
return_value=False):
with qtbot.waitSignal(app.message) as blocker:
suc = app.mainW.saveHorizonMask()
assert suc
assert ['Horizon mask [test] cannot no be saved', 2] == blocker.args
def test_saveHorizonMaskFileAs_1(qtbot):
with mock.patch.object(app.mainW,
'saveFile',
return_value=('build', 'test', 'bpts')):
with mock.patch.object(app.data,
'saveHorizonP',
return_value=True):
with qtbot.waitSignal(app.message) as blocker:
suc = app.mainW.saveHorizonMaskAs()
assert suc
assert ['Horizon mask [test] saved', 0] == blocker.args
def test_saveHorizonMaskFileAs_2(qtbot):
with mock.patch.object(app.mainW,
'saveFile',
return_value=('', '', '')):
suc = app.mainW.saveHorizonMaskAs()
assert not suc
def test_saveHorizonMaskFileAs_3(qtbot):
with mock.patch.object(app.mainW,
'saveFile',
return_value=('build', 'test', 'bpts')):
with mock.patch.object(app.data,
'saveHorizonP',
return_value=False):
with qtbot.waitSignal(app.message) as blocker:
suc = app.mainW.saveHorizonMaskAs()
assert suc
assert ['Horizon mask [test] cannot no be saved', 2] == blocker.args
| 32.826087 | 81 | 0.519962 |
db2456e013eaa95c64b25c37289b3f3fc66f70b5
| 897 |
py
|
Python
|
software/pynguin/pynguin/__init__.py
|
se2p/artifact-pynguin-ssbse2020
|
32b5f4d27ef1b81e5c541471e98fa6e50f5ce8a6
|
[
"CC-BY-4.0"
] | 3 |
2020-08-20T10:27:13.000Z
|
2021-11-02T20:28:16.000Z
|
software/pynguin/pynguin/__init__.py
|
se2p/artifact-pynguin-ssbse2020
|
32b5f4d27ef1b81e5c541471e98fa6e50f5ce8a6
|
[
"CC-BY-4.0"
] | null | null | null |
software/pynguin/pynguin/__init__.py
|
se2p/artifact-pynguin-ssbse2020
|
32b5f4d27ef1b81e5c541471e98fa6e50f5ce8a6
|
[
"CC-BY-4.0"
] | null | null | null |
# This file is part of Pynguin.
#
# Pynguin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pynguin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pynguin. If not, see <https://www.gnu.org/licenses/>.
"""Pynguin is an automated unit test generation framework for Python."""
from .configuration import Configuration
from .generator import Pynguin
__version__ = "0.1.0"
__all__ = ["Pynguin", "Configuration", "__version__"]
| 42.714286 | 77 | 0.764771 |
01925ffb4478380d23178cedeee096c7d2836912
| 12,640 |
py
|
Python
|
supervised/ensemble.py
|
TaniaSaleem14/mljar-supervised
|
b45801cdd904fd31d863b9b893352e71e2fe10d8
|
[
"MIT"
] | null | null | null |
supervised/ensemble.py
|
TaniaSaleem14/mljar-supervised
|
b45801cdd904fd31d863b9b893352e71e2fe10d8
|
[
"MIT"
] | null | null | null |
supervised/ensemble.py
|
TaniaSaleem14/mljar-supervised
|
b45801cdd904fd31d863b9b893352e71e2fe10d8
|
[
"MIT"
] | null | null | null |
import os
import logging
import copy
import numpy as np
import pandas as pd
import time
import uuid
import json
import operator
from supervised.utils.config import storage_path
from supervised.algorithms.algorithm import BaseAlgorithm
from supervised.algorithms.registry import BINARY_CLASSIFICATION
from supervised.algorithms.registry import MULTICLASS_CLASSIFICATION
from supervised.model_framework import ModelFramework
from supervised.utils.metric import Metric
from supervised.utils.config import LOG_LEVEL
from supervised.utils.additional_metrics import AdditionalMetrics
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)
import matplotlib.pyplot as plt
from tabulate import tabulate
from supervised.utils.learning_curves import LearningCurves
class Ensemble:
algorithm_name = "Greedy Ensemble"
algorithm_short_name = "Ensemble"
def __init__(self, optimize_metric="logloss", ml_task=BINARY_CLASSIFICATION):
self.library_version = "0.1"
self.uid = str(uuid.uuid4())
self.model_file = self.uid + ".ensemble.model"
self.model_file_path = os.path.join(storage_path, self.model_file)
self.metric = Metric({"name": optimize_metric})
self.best_loss = self.metric.get_maximum() # the best loss obtained by ensemble
self.models_map = None
self.selected_models = []
self.train_time = None
self.total_best_sum = None # total sum of predictions, the oof of ensemble
self.target = None
self.target_columns = None
self._ml_task = ml_task
self._optimize_metric = optimize_metric
self._additional_metrics = None
self._threshold = None
self._name = "ensemble"
self._scores = []
def get_train_time(self):
return self.train_time
def get_final_loss(self):
return self.best_loss
def get_type(self):
return self.algorithm_short_name
def get_name(self):
return self._name
def get_out_of_folds(self):
""" Needed when ensemble is treated as model and we want to compute additional metrics for it """
# single prediction (in case of binary classification and regression)
logger.debug(self.total_best_sum.shape)
logger.debug(self.total_best_sum.head())
logger.debug(self.target.shape)
logger.debug(self.target.head())
if self.total_best_sum.shape[1] == 1:
tmp_df = pd.DataFrame({"prediction": self.total_best_sum["prediction"]})
tmp_df["target"] = self.target[self.target_columns]
return tmp_df
ensemble_oof = pd.DataFrame(
data=self.total_best_sum,
columns=self.total_best_sum.columns
# [
# "prediction_{}".format(i) for i in range(self.total_best_sum.shape[1])
# ]
)
ensemble_oof["target"] = self.target
return ensemble_oof
def _get_mean(self, oof_selected, best_sum, best_count):
resp = copy.deepcopy(oof_selected)
if best_count > 1:
resp += best_sum
resp /= float(best_count)
return resp
def get_oof_matrix(self, models):
# remeber models, will be needed in predictions
self.models_map = {m.get_name(): m for m in models}
oofs = {}
for m in models:
oof = m.get_out_of_folds()
prediction_cols = [c for c in oof.columns if "prediction" in c]
oofs[m.get_name()] = oof[prediction_cols] # oof["prediction"]
if self.target is None:
self.target_columns = [c for c in oof.columns if "target" in c]
self.target = oof[
self.target_columns
] # it will be needed for computing advance model statistics
return oofs, self.target
def get_additional_metrics(self):
if self._additional_metrics is None:
logger.debug("Get additional metrics for Ensemble")
# 'target' - the target after processing used for model training
# 'prediction' - out of folds predictions of the model
oof_predictions = self.get_out_of_folds()
prediction_cols = [c for c in oof_predictions.columns if "prediction" in c]
target_cols = [c for c in oof_predictions.columns if "target" in c]
oof_preds = oof_predictions[prediction_cols]
if self._ml_task == MULTICLASS_CLASSIFICATION:
cols = oof_preds.columns.tolist()
# prediction_
labels = {i: v[11:] for i, v in enumerate(cols)}
oof_preds["label"] = np.argmax(
np.array(oof_preds[prediction_cols]), axis=1
)
oof_preds["label"] = oof_preds["label"].map(labels)
self._additional_metrics = AdditionalMetrics.compute(
oof_predictions[target_cols],
oof_preds, # oof_predictions[prediction_cols],
self._ml_task,
)
if self._ml_task == BINARY_CLASSIFICATION:
self._threshold = float(self._additional_metrics["threshold"])
return self._additional_metrics
def fit(self, oofs, y):
logger.debug("Ensemble.fit")
start_time = time.time()
selected_algs_cnt = 0 # number of selected algorithms
self.best_algs = [] # selected algoritms indices from each loop
best_sum = None # sum of best algorihtms
for j in range(len(oofs)): # iterate over all solutions
min_score = self.metric.get_maximum()
best_model = None
# try to add some algorithm to the best_sum to minimize metric
for model_name in oofs.keys():
y_ens = self._get_mean(oofs[model_name], best_sum, j + 1)
score = self.metric(y, y_ens)
if self.metric.improvement(previous=min_score, current=score):
min_score = score
best_model = model_name
# there is improvement, save it
self._scores += [min_score]
if self.metric.improvement(previous=self.best_loss, current=min_score):
self.best_loss = min_score
selected_algs_cnt = j
self.best_algs.append(best_model) # save the best algoritm
# update best_sum value
best_sum = (
oofs[best_model] if best_sum is None else best_sum + oofs[best_model]
)
if j == selected_algs_cnt:
self.total_best_sum = copy.deepcopy(best_sum)
# end of main loop #
# keep oof predictions of ensemble
self.total_best_sum /= float(selected_algs_cnt + 1)
self.best_algs = self.best_algs[: (selected_algs_cnt + 1)]
logger.debug("Selected models for ensemble:")
for model_name in np.unique(self.best_algs):
self.selected_models += [
{
"model": self.models_map[model_name],
"repeat": float(self.best_algs.count(model_name)),
}
]
logger.debug(f"{model_name} {self.best_algs.count(model_name)}")
self.get_additional_metrics()
self.train_time = time.time() - start_time
def predict(self, X):
logger.debug(
"Ensemble.predict with {} models".format(len(self.selected_models))
)
y_predicted_ensemble = None
total_repeat = 0.0
for selected in self.selected_models:
model = selected["model"]
repeat = selected["repeat"]
total_repeat += repeat
y_predicted_from_model = model.predict(X)
prediction_cols = []
if self._ml_task in [BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION]:
prediction_cols = [
c for c in y_predicted_from_model.columns if "prediction_" in c
]
else: # REGRESSION
prediction_cols = ["prediction"]
y_predicted_from_model = y_predicted_from_model[prediction_cols]
y_predicted_ensemble = (
y_predicted_from_model * repeat
if y_predicted_ensemble is None
else y_predicted_ensemble + y_predicted_from_model * repeat
)
y_predicted_ensemble /= total_repeat
if self._ml_task == MULTICLASS_CLASSIFICATION:
cols = y_predicted_ensemble.columns.tolist()
# prediction_
labels = {i: v[11:] for i, v in enumerate(cols)}
y_predicted_ensemble["label"] = np.argmax(
np.array(y_predicted_ensemble[prediction_cols]), axis=1
)
y_predicted_ensemble["label"] = y_predicted_ensemble["label"].map(labels)
return y_predicted_ensemble
def to_json(self):
models_json = []
for selected in self.selected_models:
model = selected["model"]
repeat = selected["repeat"]
models_json += [{"model": model.to_json(), "repeat": repeat}]
json_desc = {
"library_version": self.library_version,
"algorithm_name": self.algorithm_name,
"algorithm_short_name": self.algorithm_short_name,
"uid": self.uid,
"models": models_json,
}
return json_desc
def from_json(self, json_desc):
self.library_version = json_desc.get("library_version", self.library_version)
self.algorithm_name = json_desc.get("algorithm_name", self.algorithm_name)
self.algorithm_short_name = json_desc.get(
"algorithm_short_name", self.algorithm_short_name
)
self.uid = json_desc.get("uid", self.uid)
self.selected_models = []
models_json = json_desc.get("models")
for selected in models_json:
model = selected["model"]
repeat = selected["repeat"]
il = ModelFramework(model.get("params"))
il.from_json(model)
self.selected_models += [
# {"model": LearnerFactory.load(model), "repeat": repeat}
{"model": il, "repeat": repeat}
]
def save(self, model_path):
logger.info(f"Save the ensemble to {model_path}")
with open(os.path.join(model_path, "ensemble.json"), "w") as fout:
ms = []
for selected in self.selected_models:
ms += [{"model": selected["model"]._name, "repeat": selected["repeat"]}]
desc = {
"name": self._name,
"ml_task": self._ml_task,
"optimize_metric": self._optimize_metric,
"selected_models": ms,
}
if self._threshold is not None:
desc["threshold"] = self._threshold
fout.write(json.dumps(desc, indent=4))
predictions = self.get_out_of_folds()
predictions.to_csv(
os.path.join(model_path, f"predictions_ensemble.csv"), index=False
)
LearningCurves.plot_for_ensemble(self._scores, self.metric.name, model_path)
self._additional_metrics = self.get_additional_metrics()
AdditionalMetrics.save(
self._additional_metrics, self._ml_task, self.model_markdown(), model_path
)
with open(os.path.join(model_path, "status.txt"), "w") as fout:
fout.write("ALL OK!")
def model_markdown(self):
select_models_desc = []
for selected in self.selected_models:
select_models_desc += [
{"model": selected["model"]._name, "repeat": selected["repeat"]}
]
desc = f"# Summary of {self.get_name()}\n"
desc += "\n## Ensemble structure\n"
selected = pd.DataFrame(select_models_desc)
desc += tabulate(selected.values, ["Model", "Weight"], tablefmt="pipe")
desc += "\n"
return desc
@staticmethod
def load(model_path, models_map):
logger.info(f"Loading ensemble from {model_path}")
json_desc = json.load(open(os.path.join(model_path, "ensemble.json")))
ensemble = Ensemble(json_desc.get("optimize_metric"), json_desc.get("ml_task"))
ensemble._name = json_desc.get("name", ensemble._name)
ensemble._threshold = json_desc.get("threshold", ensemble._threshold)
for m in json_desc.get("selected_models", []):
ensemble.selected_models += [
{"model": models_map[m["model"]], "repeat": m["repeat"]}
]
return ensemble
| 37.286136 | 105 | 0.609256 |
fc2900b93ff1ccdce6d80e22cbcbea42fe6bf90e
| 4,224 |
py
|
Python
|
drive.py
|
ahtchow/CarND-BehaviouralCloning-P3
|
5dc41cd5080f95e0c8caacb24eb3e65b54c01494
|
[
"MIT"
] | null | null | null |
drive.py
|
ahtchow/CarND-BehaviouralCloning-P3
|
5dc41cd5080f95e0c8caacb24eb3e65b54c01494
|
[
"MIT"
] | null | null | null |
drive.py
|
ahtchow/CarND-BehaviouralCloning-P3
|
5dc41cd5080f95e0c8caacb24eb3e65b54c01494
|
[
"MIT"
] | null | null | null |
import argparse
import base64
from datetime import datetime
import os
import shutil
import numpy as np
import cv2
import socketio
import eventlet
import eventlet.wsgi
from PIL import Image
from flask import Flask
from io import BytesIO
from keras.models import load_model
import h5py
from keras import __version__ as keras_version
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
class SimplePIController:
def __init__(self, Kp, Ki):
self.Kp = Kp
self.Ki = Ki
self.set_point = 0.
self.error = 0.
self.integral = 0.
def set_desired(self, desired):
self.set_point = desired
def update(self, measurement):
# proportional error
self.error = self.set_point - measurement
# integral error
self.integral += self.error
return self.Kp * self.error + self.Ki * self.integral
controller = SimplePIController(0.1, 0.002)
set_speed = 20
controller.set_desired(set_speed)
def img_preprocess(img):
img = img[65:135 , : , :]
img = cv2.cvtColor(img , cv2.COLOR_RGB2YUV)
img = cv2.GaussianBlur(img, (3,3) , 0 )
img = cv2.resize(img,(200, 66))
img = img/255
return img
@sio.on('telemetry')
def telemetry(sid, data):
if data:
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = float(data['speed'])
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
saved_img = image
image = np.asarray(image)
image = img_preprocess(image)
image = np.array([image])
steering_angle = float(model.predict(image))
throttle = controller.update(float(speed))
print("Steering Angle: " , steering_angle, " Throttle: ", throttle)
send_control(steering_angle, throttle)
# save frame
if args.image_folder != '':
timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
image_filename = os.path.join(args.image_folder, timestamp)
saved_img.save('{}.jpg'.format(image_filename))
else:
# NOTE: DON'T EDIT THIS.
sio.emit('manual', data={}, skip_sid=True)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit(
"steer",
data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
},
skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument(
'model',
type=str,
help='Path to model h5 file. Model should be on the same path.'
)
parser.add_argument(
'image_folder',
type=str,
nargs='?',
default='',
help='Path to image folder. This is where the images from the run will be saved.'
)
args = parser.parse_args()
# check that model Keras version is same as local Keras version
# f = h5py.File(args.model, mode='r')
# model_version = f.attrs.get('keras_version')
# keras_version = str(keras_version).encode('utf8')
# if model_version != keras_version:
# print('You are using Keras version ', keras_version,
# ', but the model was built using ', model_version)
model = load_model(args.model)
if args.image_folder != '':
print("Creating image folder at {}".format(args.image_folder))
if not os.path.exists(args.image_folder):
os.makedirs(args.image_folder)
else:
shutil.rmtree(args.image_folder)
os.makedirs(args.image_folder)
print("RECORDING THIS RUN ...")
else:
print("NOT RECORDING THIS RUN ...")
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
| 27.97351 | 89 | 0.631392 |
909ba0c787abf8a896af084a044c7f6041ed28c7
| 1,398 |
py
|
Python
|
what_apps/push/functions.py
|
SlashRoot/WHAT
|
69e78d01065142446234e77ea7c8c31e3482af29
|
[
"MIT"
] | null | null | null |
what_apps/push/functions.py
|
SlashRoot/WHAT
|
69e78d01065142446234e77ea7c8c31e3482af29
|
[
"MIT"
] | null | null | null |
what_apps/push/functions.py
|
SlashRoot/WHAT
|
69e78d01065142446234e77ea7c8c31e3482af29
|
[
"MIT"
] | null | null | null |
from django.template import loader, Context, RequestContext
import stomp
import json
def push_with_template(template, context, destination):
'''
Pushes content through stomp / morbidQ to comet listeners.
This drives a lot of the "live" content on our site.
'''
t = loader.get_template(template) #Probably a very small, cookie-cutter template that gets included again and again. 'comm/call_alert.html' is a good example.
c = Context(context)
conn = stomp.Connection() #This will raise errors if stomp / orbited crash. Maybe we should try / except and handle this situation more gracefully.
conn.start()
conn.connect()
conn.send(t.render(c), destination=destination)
conn.stop()
return True
def push_with_json(dict, destination):
json_dict = json.dumps(dict)
conn = stomp.Connection() #This will raise errors if stomp / orbited crash. Maybe we should try / except and handle this situation more gracefully.
conn.start()
conn.connect()
conn.send(json_dict, destination=destination)
conn.stop()
return True
def push_with_string(string, destination):
conn = stomp.Connection() #This will raise errors if stomp / orbited crash. Maybe we should try / except and handle this situation more gracefully.
conn.start()
conn.connect()
conn.send(string, destination=destination)
conn.stop()
| 38.833333 | 163 | 0.714592 |
46540f947ac818848efeb6e75d1f8e128fe673c8
| 672 |
py
|
Python
|
manage.py
|
VektorelPythonHIA23/WebProje
|
8dc6b8116cf4ca1aa93b098721eb7e09445a9a15
|
[
"MIT"
] | null | null | null |
manage.py
|
VektorelPythonHIA23/WebProje
|
8dc6b8116cf4ca1aa93b098721eb7e09445a9a15
|
[
"MIT"
] | null | null | null |
manage.py
|
VektorelPythonHIA23/WebProje
|
8dc6b8116cf4ca1aa93b098721eb7e09445a9a15
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'VektorelPython23.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.217391 | 80 | 0.683036 |
917f97ec9ab7bf7cd1a72eeadfba0ba3543c1f56
| 3,162 |
py
|
Python
|
table_recognition/get_val_gt.py
|
cuongngm/TableMASTER-mmocr
|
77efbc420a80f257eb6947a076a50f61c72344bd
|
[
"Apache-2.0"
] | 206 |
2021-07-30T09:04:08.000Z
|
2022-03-22T00:57:44.000Z
|
table_recognition/get_val_gt.py
|
cuongngm/TableMASTER-mmocr
|
77efbc420a80f257eb6947a076a50f61c72344bd
|
[
"Apache-2.0"
] | 39 |
2021-08-05T07:16:46.000Z
|
2022-03-14T13:23:48.000Z
|
table_recognition/get_val_gt.py
|
cuongngm/TableMASTER-mmocr
|
77efbc420a80f257eb6947a076a50f61c72344bd
|
[
"Apache-2.0"
] | 61 |
2021-07-30T07:51:41.000Z
|
2022-03-30T14:40:02.000Z
|
import os
import json
import json_lines
from tqdm import tqdm
def searchMerge(tokensList):
pointer = 0
mergedTokenList = []
while tokensList[pointer]!='</tbody>':
if tokensList[pointer]=='<td>':
tmp=tokensList[pointer]+tokensList[pointer+1]
mergedTokenList.append(tmp)
pointer+=2
elif tokensList[pointer]=='<td': # <td colspan></td>
if tokensList[pointer+2].startswith(' colspan') or tokensList[pointer+2].startswith(' rowspan'):
# pattern <td rowspan="2" colspan="3">
tmp = tokensList[pointer]+tokensList[pointer+1]+tokensList[pointer+2]+tokensList[pointer+3]+tokensList[pointer+4]
pointer+=5
else:
# pattern <td colspan="3">
tmp=tokensList[pointer]+tokensList[pointer+1]+tokensList[pointer+2]+tokensList[pointer+3]
pointer+=4
mergedTokenList.append(tmp)
else:
mergedTokenList.append(tokensList[pointer])
pointer += 1
mergedTokenList.append('</tbody>')
return mergedTokenList
jsonFile = 'PubTabNet_2.0.0.jsonl'
smallVal300 = ''
thisValList = os.listdir(smallVal300)
gtDict = dict()
with open(jsonFile, 'rb') as f:
for item in tqdm(json_lines.reader(f)):
"""
item's keys : ['filename', 'split', 'imgid', 'html']
item['html']'s keys : ['cells', 'structure']
item['html']['cell'] : list of dict
eg. [
{"tokens": ["<b>", "V", "a", "r", "i", "a", "b", "l", "e", "</b>"], "bbox": [1, 4, 27, 13]},
{"tokens": ["<b>", "H", "a", "z", "a", "r", "d", " ", "r", "a", "t", "i", "o", "</b>"], "bbox": [219, 4, 260, 13]},
]
item['html']['structure']'s ['tokens']
eg. "structure": {"tokens": ["<thead>", "<tr>", "<td>", "</td>", ... ,"</tbody>"}
"""
if item['split'] != 'val':
continue
filename = item['filename']
esbFlag = False
beFlag = False
rawToken = item['html']['structure']['tokens']
mergeTokenList = searchMerge(rawToken)
mergeToken = ''.join(mergeTokenList)
# text
cells = item['html']['cells']
textList = []
for cell in cells:
if len(cell['tokens']) == 0:
# empty bbox
textList.append('')
else:
textList.append(''.join(cell['tokens']))
try:
assert len(textList) == mergeToken.count('<td')
except:
# import pdb;pdb.set_trace()
raise ValueError
textCount = 0
gtTokenList = []
for mt in mergeTokenList:
if mt.startswith('<td'):
mt = mt.replace('><', '>{}<'.format(textList[textCount]))
textCount = textCount + 1
gtTokenList.append(mt)
gtToken = ''.join(gtTokenList)
gtDict.setdefault(filename, gtToken)
gtFile = 'gtVal_1212.json'
with open(gtFile, 'w') as f:
json.dump(gtDict, f)
| 36.344828 | 143 | 0.501898 |
83c28240edc94a444e933463e7071d760509ed8b
| 1,069 |
py
|
Python
|
network_health_service/setup.py
|
kkkkv/tgnms
|
a3b8fd8a69b647a614f9856933f05e50a4affadf
|
[
"MIT"
] | 12 |
2021-04-06T06:27:18.000Z
|
2022-03-18T10:52:29.000Z
|
network_health_service/setup.py
|
kkkkv/tgnms
|
a3b8fd8a69b647a614f9856933f05e50a4affadf
|
[
"MIT"
] | 6 |
2022-01-04T13:32:16.000Z
|
2022-03-28T21:13:59.000Z
|
network_health_service/setup.py
|
kkkkv/tgnms
|
a3b8fd8a69b647a614f9856933f05e50a4affadf
|
[
"MIT"
] | 7 |
2021-09-27T13:14:42.000Z
|
2022-03-28T16:24:15.000Z
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
from setuptools import find_packages, setup
ptr_params = {
"entry_point_module": "network_health_service/main",
"test_suite": "tests.base",
"test_suite_timeout": 300,
"required_coverage": {
"network_health_service/stats/fetch_stats.py": 97,
"network_health_service/stats/health.py": 100,
"network_health_service/stats/metrics.py": 100,
"TOTAL": 75,
},
"run_flake8": True,
"run_black": True,
"run_mypy": True,
}
setup(
name="network_health_service",
version="2021.06.17",
packages=find_packages(exclude=["tests"]),
python_requires=">=3.7",
install_requires=["aiohttp", "aiomysql", "alembic>=1.3.3,<2.0", "sqlalchemy"],
extras_require={
"ci": ["ptr", "asynctest>=0.13.0,<1.0"],
"docs": ["aiohttp-swagger>=1.0.9,<2.0"],
},
test_suite=ptr_params["test_suite"],
entry_points={
"console_scripts": ["network_health_service = network_health_service.main:main"]
},
)
| 28.891892 | 88 | 0.645463 |
e6379a00da2446f993f974f40e97ed81ce17cc42
| 465 |
py
|
Python
|
adapters/samsung/sensor_door.py
|
michahagg/domoticz-zigbee2mqtt-plugin
|
0d891a0bd96ed26547904ae8402a26e684dc8e35
|
[
"MIT"
] | 1 |
2021-01-17T16:53:44.000Z
|
2021-01-17T16:53:44.000Z
|
adapters/samsung/sensor_door.py
|
schurgan/zigbee2mqtt-plugin
|
41042bc52d34ad503812154a11d7f63aede44c71
|
[
"MIT"
] | null | null | null |
adapters/samsung/sensor_door.py
|
schurgan/zigbee2mqtt-plugin
|
41042bc52d34ad503812154a11d7f63aede44c71
|
[
"MIT"
] | null | null | null |
from adapters.adapter_with_battery import AdapterWithBattery
from devices.sensor.door_contact import DoorContactSensor
from devices.sensor.temperature import TemperatureSensor
class SmartThingsDoorSensor(AdapterWithBattery):
def __init__(self, devices):
super().__init__(devices)
self.devices.append(DoorContactSensor(devices, 'sensor', 'contact'))
self.devices.append(TemperatureSensor(devices, 'temp', 'temperature', 'temperature'))
| 42.272727 | 93 | 0.787097 |
6b3b21e54188815f15dcf7577a8537ea175794ac
| 201 |
py
|
Python
|
braindecode/models/__init__.py
|
TonioBall/braindecode
|
d5b8d87d959c96ea8422e21099e1ef4b71b9d05a
|
[
"BSD-3-Clause"
] | null | null | null |
braindecode/models/__init__.py
|
TonioBall/braindecode
|
d5b8d87d959c96ea8422e21099e1ef4b71b9d05a
|
[
"BSD-3-Clause"
] | null | null | null |
braindecode/models/__init__.py
|
TonioBall/braindecode
|
d5b8d87d959c96ea8422e21099e1ef4b71b9d05a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Some predefined network architectures for EEG decoding.
"""
from .deep4 import Deep4Net
from .eegnet import EEGNetv4
from .hybrid import HybridNetModule
from .shallow_fbcsp import ShallowFBCSPNet
| 22.333333 | 55 | 0.81592 |
3683e6722bf0fd8467127c15ad0dfa118912d14e
| 7,406 |
py
|
Python
|
skyportal/handlers/api/taxonomy.py
|
steveschulze/skyportal
|
47e334d71e34e82ff41bd0e32326e4107741e8e6
|
[
"BSD-3-Clause"
] | null | null | null |
skyportal/handlers/api/taxonomy.py
|
steveschulze/skyportal
|
47e334d71e34e82ff41bd0e32326e4107741e8e6
|
[
"BSD-3-Clause"
] | null | null | null |
skyportal/handlers/api/taxonomy.py
|
steveschulze/skyportal
|
47e334d71e34e82ff41bd0e32326e4107741e8e6
|
[
"BSD-3-Clause"
] | null | null | null |
from tdtax import schema, validate
from jsonschema.exceptions import ValidationError as JSONValidationError
from baselayer.app.access import permissions, auth_or_token
from ..base import BaseHandler
from ...models import DBSession, Taxonomy, Group
class TaxonomyHandler(BaseHandler):
@auth_or_token
def get(self, taxonomy_id=None):
"""
---
single:
description: Retrieve a taxonomy
tags:
- taxonomies
parameters:
- in: path
name: taxonomy_id
required: true
schema:
type: integer
responses:
200:
content:
application/json:
schema: SingleTaxonomy
400:
content:
application/json:
schema: Error
multiple:
description: Get all the taxonomies
tags:
- taxonomies
responses:
200:
content:
application/json:
schema: ArrayOfTaxonomys
400:
content:
application/json:
schema: Error
"""
if taxonomy_id is not None:
taxonomy = Taxonomy.get_taxonomy_usable_by_user(
taxonomy_id, self.current_user
)
if taxonomy is None or len(taxonomy) == 0:
return self.error(
'Taxonomy does not exist or is not available to user.'
)
return self.success(data=taxonomy[0])
query = Taxonomy.query.filter(
Taxonomy.groups.any(
Group.id.in_([g.id for g in self.current_user.accessible_groups])
)
)
self.verify_and_commit()
return self.success(data=query.all())
@permissions(['Post taxonomy'])
def post(self):
"""
---
description: Post new taxonomy
tags:
- taxonomies
requestBody:
content:
application/json:
schema:
type: object
properties:
name:
type: string
description: |
Short string to make this taxonomy memorable
to end users.
hierarchy:
type: object
description: |
Nested JSON describing the taxonomy
which should be validated against
a schema before entry
group_ids:
type: array
items:
type: integer
description: |
List of group IDs corresponding to which groups should be
able to view comment. Defaults to all of requesting
user's groups.
version:
type: string
description: |
Semantic version of this taxonomy name
provenance:
type: string
description: |
Identifier (e.g., URL or git hash) that
uniquely ties this taxonomy back
to an origin or place of record
isLatest:
type: boolean
description: |
Consider this version of the taxonomy with this
name the latest? Defaults to True.
required:
- name
- hierarchy
- version
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
taxonomy_id:
type: integer
description: New taxonomy ID
"""
data = self.get_json()
name = data.get('name', None)
if name is None:
return self.error("A name must be provided for a taxonomy")
version = data.get('version', None)
if version is None:
return self.error("A version string must be provided for a taxonomy")
existing_matches = (
Taxonomy.query.filter(Taxonomy.name == name)
.filter(Taxonomy.version == version)
.all()
)
if len(existing_matches) != 0:
return self.error(
"That version/name combination is already "
"present. If you really want to replace this "
"then delete the appropriate entry."
)
# Ensure a valid taxonomy
hierarchy = data.get('hierarchy', None)
if hierarchy is None:
return self.error("A JSON of the taxonomy must be given")
try:
validate(hierarchy, schema)
except JSONValidationError:
return self.error("Hierarchy does not validate against the schema.")
# establish the groups to use
user_group_ids = [g.id for g in self.current_user.groups]
user_accessible_group_ids = [g.id for g in self.current_user.accessible_groups]
group_ids = data.pop("group_ids", user_group_ids)
if group_ids == []:
group_ids = user_group_ids
group_ids = [gid for gid in group_ids if gid in user_accessible_group_ids]
if not group_ids:
return self.error(
f"Invalid group IDs field ({group_ids}): "
"You must provide one or more valid group IDs."
)
groups = Group.query.filter(Group.id.in_(group_ids)).all()
provenance = data.get('provenance', None)
# update others with this name
# TODO: deal with the same name but different groups?
isLatest = data.get('isLatest', True)
if isLatest:
DBSession().query(Taxonomy).filter(Taxonomy.name == name).update(
{'isLatest': False}
)
taxonomy = Taxonomy(
name=name,
hierarchy=hierarchy,
provenance=provenance,
version=version,
isLatest=isLatest,
groups=groups,
)
DBSession().add(taxonomy)
self.verify_and_commit()
return self.success(data={'taxonomy_id': taxonomy.id})
@auth_or_token
def delete(self, taxonomy_id):
"""
---
description: Delete a taxonomy
tags:
- taxonomies
parameters:
- in: path
name: taxonomy_id
required: true
schema:
type: integer
responses:
200:
content:
application/json:
schema: Success
"""
taxonomy = Taxonomy.get_if_accessible_by(
taxonomy_id, self.current_user, mode='delete', raise_if_none=True
)
DBSession().delete(taxonomy)
self.verify_and_commit()
return self.success()
| 32.060606 | 87 | 0.491628 |
eef12fb4e7183f5d11d04b5cf7d0b157a732c90e
| 23,310 |
py
|
Python
|
tacred_5shot.py
|
qcwthu/Continual_Fewshot_Relation_Learning
|
9d94a9ddc9de6300deec1d5bd434cda0a7a3f1eb
|
[
"MIT"
] | null | null | null |
tacred_5shot.py
|
qcwthu/Continual_Fewshot_Relation_Learning
|
9d94a9ddc9de6300deec1d5bd434cda0a7a3f1eb
|
[
"MIT"
] | null | null | null |
tacred_5shot.py
|
qcwthu/Continual_Fewshot_Relation_Learning
|
9d94a9ddc9de6300deec1d5bd434cda0a7a3f1eb
|
[
"MIT"
] | null | null | null |
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import sys
import json
import gc
from tqdm import tqdm
from sklearn.cluster import KMeans
from encode import lstm_encoder
from dataprocess_tacred import data_sampler
from model import proto_softmax_layer
from dataprocess_tacred import get_data_loader
from transformers import BertTokenizer,BertModel
from util import set_seed,process_data,getnegfrombatch,select_similar_data_new_tac
import faiss
def eval_model(config, basemodel, test_set, mem_relations):
print("One eval")
print("test data num is:\t",len(test_set))
basemodel.eval()
test_dataloader = get_data_loader(config, test_set, shuffle=False, batch_size=30)
allnum= 0.0
correctnum = 0.0
for step, (labels, neg_labels, sentences, firstent, firstentindex, secondent, secondentindex, headid, tailid, rawtext, lengths,
typelabels) in enumerate(test_dataloader):
logits, rep = basemodel(sentences, lengths)
distances = basemodel.get_mem_feature(rep)
short_logits = distances
#short_logits = logits
for index, logit in enumerate(logits):
score = short_logits[index] # logits[index] + short_logits[index] + long_logits[index]
allnum += 1.0
golden_score = score[labels[index]]
max_neg_score = -2147483647.0
for i in neg_labels[index]: # range(num_class):
if (i != labels[index]) and (score[i] > max_neg_score):
max_neg_score = score[i]
if golden_score > max_neg_score:
correctnum += 1
acc = correctnum / allnum
print(acc)
basemodel.train()
return acc
def get_memory(config, model, proto_set):
memset = []
resset = []
rangeset= [0]
for i in proto_set:
#print(i)
memset += i
rangeset.append(rangeset[-1] + len(i))
data_loader = get_data_loader(config, memset, False, False)
features = []
for step, (labels, neg_labels, sentences, firstent, firstentindex, secondent, secondentindex, headid, tailid, rawtext, lengths,
typelabels) in enumerate(data_loader):
feature = model.get_feature(sentences, lengths)
features.append(feature)
features = np.concatenate(features)
protos = []
#print ("proto_instaces:%d"%len(features))
for i in range(len(proto_set)):
protos.append(torch.tensor(features[rangeset[i]:rangeset[i+1],:].mean(0, keepdims = True)))
protos = torch.cat(protos, 0)
#print(protos.shape)
return protos
def select_data(mem_set, proto_memory, config, model, divide_train_set, num_sel_data, current_relations, selecttype):
####select data according to selecttype
#selecttype is 0: cluster for every rel
#selecttype is 1: use ave embedding
rela_num = len(current_relations)
for i in range(0, rela_num):
thisrel = current_relations[i]
if thisrel in mem_set.keys():
#print("have set mem before")
mem_set[thisrel] = {'0': [], '1': {'h': [], 't': []}}
proto_memory[thisrel].pop()
else:
mem_set[thisrel] = {'0': [], '1': {'h': [], 't': []}}
thisdataset = divide_train_set[thisrel]
data_loader = get_data_loader(config, thisdataset, False, False)
features = []
for step, (labels, neg_labels, sentences, firstent, firstentindex, secondent, secondentindex, headid, tailid, rawtext, lengths,
typelabels) in enumerate(data_loader):
feature = model.get_feature(sentences, lengths)
features.append(feature)
features = np.concatenate(features)
#print(features.shape)
num_clusters = min(num_sel_data, len(thisdataset))
if selecttype == 0:
kmeans = KMeans(n_clusters=num_clusters, random_state=0)
distances = kmeans.fit_transform(features)
for i in range(num_clusters):
sel_index = np.argmin(distances[:, i])
instance = thisdataset[sel_index]
###change tylelabel
instance[11] = 3
###add to mem data
mem_set[thisrel]['0'].append(instance) ####positive sample
cluster_center = kmeans.cluster_centers_[i]
#print(cluster_center.shape)
proto_memory[thisrel].append(instance)
elif selecttype == 1:
#print("use average embedding")
samplenum = features.shape[0]
veclength = features.shape[1]
sumvec = np.zeros(veclength)
for j in range(samplenum):
sumvec += features[j]
sumvec /= samplenum
###find nearest sample
mindist = 100000000
minindex = -100
for j in range(samplenum):
dist = np.sqrt(np.sum(np.square(features[j] - sumvec)))
if dist < mindist:
minindex = j
mindist = dist
#print(minindex)
instance = thisdataset[j]
###change tylelabel
instance[11] = 3
mem_set[thisrel]['0'].append(instance)
proto_memory[thisrel].append(instance)
else:
print("error select type")
#####to get negative sample mem_set[thisrel]['1']
if rela_num > 1:
####we need to sample negative samples
allnegres = {}
for i in range(rela_num):
thisnegres = {'h':[],'t':[]}
currel = current_relations[i]
thisrelposnum = len(mem_set[currel]['0'])
#assert thisrelposnum == num_sel_data
#allnum = list(range(thisrelposnum))
for j in range(thisrelposnum):
thisnegres['h'].append(mem_set[currel]['0'][j][3])
thisnegres['t'].append(mem_set[currel]['0'][j][5])
allnegres[currel] = thisnegres
####get neg sample
for i in range(rela_num):
togetnegindex = (i + 1) % rela_num
togetnegrelname = current_relations[togetnegindex]
mem_set[current_relations[i]]['1']['h'].extend(allnegres[togetnegrelname]['h'])
mem_set[current_relations[i]]['1']['t'].extend(allnegres[togetnegrelname]['t'])
return mem_set
tempthre = 0.2
factorfor2 = 1.0
factorfor3 = 1.0
factorfor4 = 1.0
factorfor5 = 0.1
def train_model_with_hard_neg(config, model, mem_set, traindata, epochs, current_proto, ifnegtive=0):
print(len(traindata))
#print(len(train_set))
mem_data = []
if len(mem_set) != 0:
for key in mem_set.keys():
mem_data.extend(mem_set[key]['0'])
print(len(mem_data))
train_set = traindata + mem_data
#train_set.extend(mem_data) ########??????maybe some question!! 重复添加mem
print(len(train_set))
data_loader = get_data_loader(config, train_set, batch_size=config['batch_size_per_step'])
model.train()
criterion = nn.CrossEntropyLoss()
lossfn = nn.MultiMarginLoss(margin=0.2)
optimizer = optim.Adam(model.parameters(), config['learning_rate'])
for epoch_i in range(epochs):
model.set_memorized_prototypes(current_proto)
losses1 = []
losses2 = []
losses3 = []
losses4 = []
losses5 = []
lossesfactor1 = 0.0
lossesfactor2 = factorfor2
lossesfactor3 = factorfor3
lossesfactor4 = factorfor4
lossesfactor5 = factorfor5
for step, (labels, neg_labels, sentences, firstent, firstentindex, secondent, secondentindex, headid, tailid, rawtext, lengths,
typelabels) in enumerate(data_loader):
model.zero_grad()
#print(len(sentences))
labels = labels.to(config['device'])
typelabels = typelabels.to(config['device']) ####0:rel 1:pos(new train data) 2:neg 3:mem
numofmem = 0
numofnewtrain = 0
allnum = 0
memindex = []
for index,onetype in enumerate(typelabels):
if onetype == 1:
numofnewtrain += 1
if onetype == 3:
numofmem += 1
memindex.append(index)
allnum += 1
#print(numofmem)
#print(numofnewtrain)
getnegfromnum = 1
allneg = []
alllen = []
if numofmem > 0:
###select neg data for mem
for oneindex in memindex:
negres,lenres = getnegfrombatch(oneindex,firstent,firstentindex,secondent,secondentindex,sentences,lengths,getnegfromnum,allnum,labels,neg_labels)
for aa in negres:
allneg.append(torch.tensor(aa))
for aa in lenres:
alllen.append(torch.tensor(aa))
sentences.extend(allneg)
lengths.extend(alllen)
logits, rep = model(sentences, lengths)
#print(logits.shape)
#print(rep.shape)
logits_proto = model.mem_forward(rep)
#print(logits_proto.shape)
logitspos = logits[0:allnum,]
#print(logitspos.shape)
logits_proto_pos = logits_proto[0:allnum,]
#print(logits_proto_pos.shape)
if numofmem > 0:
logits_proto_neg = logits_proto[allnum:,]
logits = logitspos
logits_proto = logits_proto_pos
loss1 = criterion(logits, labels)
loss2 = criterion(logits_proto, labels)
loss4 = lossfn(logits_proto, labels)
loss3 = torch.tensor(0.0).to(config['device'])
for index, logit in enumerate(logits):
score = logits_proto[index]
preindex = labels[index]
maxscore = score[preindex]
size = score.shape[0]
secondmax = -100000
for j in range(size):
if j != preindex and score[j] > secondmax:
secondmax = score[j]
if secondmax - maxscore + tempthre > 0.0:
loss3 += (secondmax - maxscore + tempthre).to(config['device'])
loss3 /= logits.shape[0]
start = 0
loss5 = torch.tensor(0.0).to(config['device'])
allusenum = 0
for index in memindex:
onepos = logits_proto[index]
posindex = labels[index]
#poslabelscore = torch.exp(onepos[posindex])
poslabelscore = onepos[posindex]
negnum = getnegfromnum * 2
negscore = torch.tensor(0.0).to(config['device'])
for ii in range(start, start + negnum):
oneneg = logits_proto_neg[ii]
#negscore += torch.exp(oneneg[posindex])
negscore = oneneg[posindex]
if negscore - poslabelscore + 0.01 > 0.0 and negscore < poslabelscore:
loss5 += (negscore - poslabelscore + 0.01)
allusenum += 1
#loss5 += (-torch.log(poslabelscore/(poslabelscore+negscore)))
start += negnum
#print(len(memindex))
if len(memindex) == 0:
loss = loss1 * lossesfactor1 + loss2 * lossesfactor2 + loss3 * lossesfactor3 + loss4 * lossesfactor4
else:
#loss5 /= len(memindex)
loss5 = loss5 / allusenum
#loss = loss1 * lossesfactor1 + loss2 * lossesfactor2 + loss3 * lossesfactor3 + loss4 * lossesfactor4 ###no loss5
loss = loss1 * lossesfactor1 + loss2 * lossesfactor2 + loss3 * lossesfactor3 + loss4 * lossesfactor4 + loss5 * lossesfactor5 ###with loss5
loss.backward()
losses1.append(loss1.item())
losses2.append(loss2.item())
losses3.append(loss3.item())
losses4.append(loss4.item())
losses5.append(loss5.item())
#print("step:\t", step, "\tloss1:\t", loss1.item(), "\tloss2:\t", loss2.item(), "\tloss3:\t", loss3.item(),
# "\tloss4:\t", loss4.item(), "\tloss5:\t", loss5.item())
torch.nn.utils.clip_grad_norm_(model.parameters(), config['max_grad_norm'])
optimizer.step()
return model
def train_simple_model(config, model, mem_set, train_set, epochs, current_proto, ifusemem=False):
if ifusemem:
mem_data = []
if len(mem_set)!=0:
for key in mem_set.keys():
mem_data.extend(mem_set[key]['0'])
train_set.extend(mem_data)
data_loader = get_data_loader(config, train_set, batch_size=config['batch_size_per_step'])
model.train()
criterion = nn.CrossEntropyLoss()
lossfn = nn.MultiMarginLoss(margin=0.2)
optimizer = optim.Adam(model.parameters(), config['learning_rate'])
for epoch_i in range(epochs):
model.set_memorized_prototypes(current_proto)
losses1 = []
losses2 = []
losses3 = []
losses4 = []
lossesfactor1 = 0.0
lossesfactor2 = factorfor2
lossesfactor3 = factorfor3
lossesfactor4 = factorfor4
for step, (labels, neg_labels, sentences, firstent, firstentindex, secondent, secondentindex, headid, tailid, rawtext,
lengths, typelabels) in enumerate(tqdm(data_loader)):
model.zero_grad()
logits, rep = model(sentences, lengths)
logits_proto = model.mem_forward(rep)
labels = labels.to(config['device'])
loss1 = criterion(logits, labels)
loss2 = criterion(logits_proto, labels)
loss4 = lossfn(logits_proto, labels)
loss3 = torch.tensor(0.0).to(config['device'])
###add triple loss
for index, logit in enumerate(logits):
score = logits_proto[index]
preindex = labels[index]
maxscore = score[preindex]
size = score.shape[0]
secondmax = -100000
for j in range(size):
if j != preindex and score[j] > secondmax:
secondmax = score[j]
if secondmax - maxscore + tempthre > 0.0:
loss3 += (secondmax - maxscore + tempthre).to(config['device'])
loss3 /= logits.shape[0]
loss = loss1 * lossesfactor1 + loss2 * lossesfactor2 + loss3 * lossesfactor3 + loss4 * lossesfactor4
loss.backward()
losses1.append(loss1.item())
losses2.append(loss2.item())
losses3.append(loss3.item())
losses4.append(loss4.item())
torch.nn.utils.clip_grad_norm_(model.parameters(), config['max_grad_norm'])
optimizer.step()
#print (np.array(losses).mean())
return model
if __name__ == '__main__':
select_thredsold_param = 0.65
select_num = 1
f = open("config/config_tacred.json", "r")
config = json.loads(f.read())
f.close()
config['device'] = torch.device('cuda' if torch.cuda.is_available() and config['use_gpu'] else 'cpu')
config['n_gpu'] = torch.cuda.device_count()
config['batch_size_per_step'] = int(config['batch_size'] / config["gradient_accumulation_steps"])
config['neg_sampling'] = False
root_path = '.'
word2id = json.load(open(os.path.join(root_path, 'glove/word2id.txt')))
word2vec = np.load(os.path.join(root_path, 'glove/word2vec.npy'))
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
donum = 1
distantpath = "data/distantdata/"
file1 = distantpath + "distant.json"
file2 = distantpath + "exclude_fewrel_distant.json"
list_data,entpair2scope = process_data(file1,file2)
topk = 16
max_sen_length_for_select = 64
max_sen_lstm_tokenize = 128
select_thredsold = select_thredsold_param
print("********* load from ckpt ***********")
ckptpath = "simmodelckpt"
print(ckptpath)
ckpt = torch.load(ckptpath)
SimModel = BertModel.from_pretrained('bert-base-uncased',state_dict=ckpt["bert-base"]).to(config["device"])
allunlabledata = np.load("allunlabeldata.npy").astype('float32')
d = 768 * 2
index = faiss.IndexFlatIP(d)
print(index.is_trained)
index.add(allunlabledata) # add vectors to the index
print(index.ntotal)
for m in range(donum):
print(m)
config["rel_cluster_label"] = "data/tacred/CFRLdata_10_100_10_5/rel_cluster_label_" + str(m) + ".npy"
config['training_file'] = "data/tacred/CFRLdata_10_100_10_5/train_" + str(m) + ".txt"
config['valid_file'] = "data/tacred/CFRLdata_10_100_10_5/valid_" + str(m) + ".txt"
config['test_file'] = "data/tacred/CFRLdata_10_100_10_5/test_" + str(m) + ".txt"
encoderforbase = lstm_encoder(token2id=word2id, word2vec=word2vec, word_size=len(word2vec[0]), max_length=128, pos_size=None,
hidden_size=config['hidden_size'], dropout=0, bidirectional=True, num_layers=1, config=config)
sampler = data_sampler(config, encoderforbase.tokenizer)
modelforbase = proto_softmax_layer(encoderforbase, num_class=len(sampler.id2rel), id2rel=sampler.id2rel, drop=0, config=config)
modelforbase = modelforbase.to(config["device"])
word2vec_back = word2vec.copy()
sequence_results = []
result_whole_test = []
for i in range(6):
num_class = len(sampler.id2rel)
print(config['random_seed'] + 10 * i)
set_seed(config, config['random_seed'] + 10 * i)
sampler.set_seed(config['random_seed'] + 10 * i)
mem_set = {} #### mem_set = {rel_id:{'0':[positive samples],'1':[negative samples]}} 换5个head 换5个tail
mem_relations = [] ###not include relation of current task
past_relations = []
savetest_all_data = None
saveseen_relations = []
proto_memory = []
for i in range(len(sampler.id2rel)):
proto_memory.append([sampler.id2rel_pattern[i]])
oneseqres = []
##################################
whichdataselecct = 1
ifnorm = True
##################################
for steps, (training_data, valid_data, test_data, test_all_data, seen_relations, current_relations) in enumerate(sampler):
#print(steps)
print("------------------------")
print(len(training_data))
#for aa in range(20):
# print(training_data[aa])
savetest_all_data = test_all_data
saveseen_relations = seen_relations
currentnumber = len(current_relations)
print(currentnumber)
print(current_relations)
divide_train_set = {}
for relation in current_relations:
divide_train_set[relation] = [] ##int
for data in training_data:
divide_train_set[data[0]].append(data)
print(len(divide_train_set))
####select most similar sentence for new task, not for base task
####step==0是base model
if steps == 0:
##train base model
print("train base model,not select most similar")
else:
print("train new model,select most similar")
selectdata = select_similar_data_new_tac(training_data, tokenizer, entpair2scope, topk,
max_sen_length_for_select,list_data, config, SimModel,
select_thredsold,max_sen_lstm_tokenize,encoderforbase.tokenizer,index,ifnorm,select_num)
print(len(selectdata))
training_data.extend(selectdata)
print(len(training_data))
#'''
current_proto = get_memory(config, modelforbase, proto_memory)
modelforbase = train_simple_model(config, modelforbase, mem_set, training_data, 1,
current_proto, False)
select_data(mem_set, proto_memory, config, modelforbase, divide_train_set,
config['rel_memory_size'], current_relations, 0) ##config['rel_memory_size'] == 1
for j in range(2):
current_proto = get_memory(config, modelforbase, proto_memory)
modelforbase = train_model_with_hard_neg(config, modelforbase, mem_set, training_data, 1,
current_proto, ifnegtive=0)
current_proto = get_memory(config, modelforbase, proto_memory)
modelforbase.set_memorized_prototypes(current_proto)
mem_relations.extend(current_relations)
currentalltest = []
for mm in range(len(test_data)):
currentalltest.extend(test_data[mm])
#eval_model(config, modelforbase, test_data[mm], mem_relations)
thisstepres = eval_model(config, modelforbase, currentalltest, mem_relations)
print("step:\t",steps,"\taccuracy:\t",thisstepres)
oneseqres.append(thisstepres)
sequence_results.append(np.array(oneseqres))
#def eval_both_model(config, newmodel, basemodel, test_set, mem_relations, baserelation, newrelation, proto_embed):
allres = eval_model(config, modelforbase, savetest_all_data, saveseen_relations)
result_whole_test.append(allres)
print("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")
print("after one epoch allres:\t",allres)
print(result_whole_test)
# initialize the models
modelforbase = modelforbase.to('cpu')
del modelforbase
gc.collect()
if config['device'] == 'cuda':
torch.cuda.empty_cache()
encoderforbase = lstm_encoder(token2id=word2id, word2vec=word2vec_back.copy(), word_size=len(word2vec[0]),max_length=128, pos_size=None,
hidden_size=config['hidden_size'], dropout=0, bidirectional=True, num_layers=1, config=config)
modelforbase = proto_softmax_layer(encoderforbase, num_class=len(sampler.id2rel), id2rel=sampler.id2rel,
drop=0, config=config)
modelforbase.to(config["device"])
# output the final avg result
print("Final result!")
print(result_whole_test)
for one in sequence_results:
for item in one:
sys.stdout.write('%.4f, ' % item)
print('')
avg_result_all_test = np.average(sequence_results, 0)
for one in avg_result_all_test:
sys.stdout.write('%.4f, ' % one)
print('')
print("Finish training............................")
#'''
| 43.488806 | 166 | 0.57825 |
25b5ea514438cb8a32e933f41f7aac69d3d89fc9
| 3,226 |
py
|
Python
|
hello_world/hello_world/settings.py
|
swilliams704/django-hello-world
|
94bd3784cc93d7a49ec50c82cd1384e95c9bede3
|
[
"Apache-2.0"
] | null | null | null |
hello_world/hello_world/settings.py
|
swilliams704/django-hello-world
|
94bd3784cc93d7a49ec50c82cd1384e95c9bede3
|
[
"Apache-2.0"
] | null | null | null |
hello_world/hello_world/settings.py
|
swilliams704/django-hello-world
|
94bd3784cc93d7a49ec50c82cd1384e95c9bede3
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for hello_world project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a2#@_jf_+rs%ulwg*pjv7)4)^(djl%-q&f1d6(0dc=+^07v^8)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hello_world.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hello_world.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'poll_db',
'USER': 'db_user',
'PASSWORD': 'passw0rd',
'HOST': 'hello_world_db',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| 25.603175 | 91 | 0.686299 |
23640bde89679632cdcacac6cfe355008293db58
| 1,880 |
py
|
Python
|
python/sparkts/models/ARGARCH.py
|
shenjian74/spark-timeseries
|
05edf228791a2fe28de28984618fbcb10d1a5fc1
|
[
"Apache-2.0"
] | 1 |
2018-10-23T22:12:07.000Z
|
2018-10-23T22:12:07.000Z
|
python/sparkts/models/ARGARCH.py
|
shenjian74/spark-timeseries
|
05edf228791a2fe28de28984618fbcb10d1a5fc1
|
[
"Apache-2.0"
] | 1 |
2018-10-26T21:48:29.000Z
|
2018-10-26T21:48:29.000Z
|
python/sparkts/models/ARGARCH.py
|
shenjian74/spark-timeseries
|
05edf228791a2fe28de28984618fbcb10d1a5fc1
|
[
"Apache-2.0"
] | null | null | null |
from sparkts.models._model import PyModel
from pyspark.mllib.common import _py2java, _java2py
from pyspark.mllib.linalg import Vectors
"""
"""
def fit_model(ts, sc=None):
"""
Fits an AR(1) + GARCH(1, 1) model to the given time series.
Parameters
----------
ts:
the time series to which we want to fit a AR+GARCH model as a Numpy array
Returns an ARGARCH model
"""
assert sc != None, "Missing SparkContext"
jvm = sc._jvm
jmodel = jvm.com.cloudera.sparkts.models.ARGARCH.fitModel(_py2java(sc, Vectors.dense(ts)))
return ARGARCHModel(jmodel=jmodel, sc=sc)
class ARGARCHModel(PyModel):
def __init__(self, c=0.0, phi=0.0, omega=0.0, alpha=0.0, beta=0.0, jmodel=None, sc=None):
assert sc != None, "Missing SparkContext"
self._ctx = sc
if jmodel == None:
self._jmodel = self._ctx._jvm.com.cloudera.sparkts.models.ARGARCHModel(c, phi, omega, alpha, beta)
else:
self._jmodel = jmodel
self.c = self._jmodel.c()
self.phi = self._jmodel.phi()
self.omega = self._jmodel.omega()
self.alpha = self._jmodel.alpha()
self.beta = self._jmodel.beta()
def sample(self, n):
"""
Samples a random time series of a given length with the properties of the model.
Parameters
----------
n:
The length of the time series to sample.
Returns the sampled time series.
"""
rg = self._ctx._jvm.org.apache.commons.math3.random.JDKRandomGenerator()
return _java2py(self._ctx, self._jmodel.sample(n, rg))
def sample_with_variances(self):
rg = self._ctx._jvm.org.apache.commons.math3.random.JDKRandomGenerator()
return _java2py(self._ctx, self._jmodel.sampleWithVariances(n, rg))
| 31.333333 | 110 | 0.61117 |
f851986d4a5689059de50195f60832dfa5a99180
| 214 |
py
|
Python
|
lesson-04/classwork/cw-04-02.py
|
Evgesha3425/lessons2
|
84f93b83d7ab4d33809cffceaec9a1f22c32856c
|
[
"BSD-2-Clause"
] | 1 |
2021-11-14T13:06:45.000Z
|
2021-11-14T13:06:45.000Z
|
lesson-04/classwork/cw-04-02.py
|
Evgesha3425/lessons
|
84f93b83d7ab4d33809cffceaec9a1f22c32856c
|
[
"BSD-2-Clause"
] | null | null | null |
lesson-04/classwork/cw-04-02.py
|
Evgesha3425/lessons
|
84f93b83d7ab4d33809cffceaec9a1f22c32856c
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Написать программу, которая выведет на экран все числа от 1 до 100 которые кратные n (n вводится с клавиатуры).
"""
n = int(input("Enter number: "))
for x in range(1, 101):
if x % n == 0:
print(x)
| 23.777778 | 111 | 0.630841 |
c2841cb2dec7cf1af81f430bee27a5b150863f43
| 1,007 |
py
|
Python
|
football_analytics_platform/source/_archive/upload_cloud_storage.py
|
JonNixonCodes/football-analytics-platform
|
8be8e7f2a3638f74490462013b0e91db1b2be98d
|
[
"MIT"
] | null | null | null |
football_analytics_platform/source/_archive/upload_cloud_storage.py
|
JonNixonCodes/football-analytics-platform
|
8be8e7f2a3638f74490462013b0e91db1b2be98d
|
[
"MIT"
] | null | null | null |
football_analytics_platform/source/_archive/upload_cloud_storage.py
|
JonNixonCodes/football-analytics-platform
|
8be8e7f2a3638f74490462013b0e91db1b2be98d
|
[
"MIT"
] | null | null | null |
# upload_cloud_storage.py
# %% Import libraries
import os
import re
from google.cloud import storage
# %% Define functions
def set_google_application_credentials(credentials_file_path):
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credentials_file_path
def extract_blob_path(dest_folder_path, src_file_path):
file_name = re.search(r"/(\w+\.csv)",src_file_path)[1]
dest_blob_path = dest_folder_path + file_name
return dest_blob_path
def upload_cloud_storage(bucket_name, source_file_path, destination_blob_path):
"""Uploads a file to Cloud Storage bucket."""
# The ID of your GCS bucket
# bucket_name = "your-bucket-name"
# The path to your file to upload
# source_file_name = "local/path/to/file"
# The ID of your GCS object
# destination_blob_name = "storage-object-name"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_path)
blob.upload_from_filename(source_file_path)
| 34.724138 | 79 | 0.753724 |
01a5f59cd263bfac70e0e7b83bd7f02a9c3e575a
| 3,578 |
py
|
Python
|
src/docReader.py
|
emmanuelmacharia/document-parser
|
1c5582f27d0fd84a18af7eb80cdc7c75c8d810d7
|
[
"MIT"
] | null | null | null |
src/docReader.py
|
emmanuelmacharia/document-parser
|
1c5582f27d0fd84a18af7eb80cdc7c75c8d810d7
|
[
"MIT"
] | null | null | null |
src/docReader.py
|
emmanuelmacharia/document-parser
|
1c5582f27d0fd84a18af7eb80cdc7c75c8d810d7
|
[
"MIT"
] | null | null | null |
import os
import re
from dateutil.parser import parse
from collections.abc import Iterable
import lxml.html
import csv
import docx
from docx2python import docx2python
def flatten(l):
for el in l:
if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):
yield from flatten(el)
else:
yield el
def is_date(string, fuzzy=False):
"""
Return whether the string can be interpreted as a date.
:param string: str, string to check for date
:param fuzzy: bool, ignore unknown tokens in string if True
"""
try:
parse(string, fuzzy=fuzzy)
return True
except ValueError:
return False
def absolute_file_paths(directory):
path = os.path.abspath(directory)
my_files = [entry.path for entry in os.scandir(path) if entry.is_file() and (entry.name.endswith('docx') or entry.name.endswith('DOCX'))]
return my_files
def get_doc(fileName, params):
doc = docx.Document(fileName)
use_docx2python(fileName, params)
paragraphs = []
for para in doc.paragraphs:
if para.text != '' and para.text != ' ':
paragraphs.append(para)
return paragraphs
def use_docx2python(fileName, params):
''' gets our title and the place, if theres any'''
parsed = docx2python(fileName)
heading = parsed.body
body = flatten(heading)
title = ''
place = ''
for i in body:
if '<a href=' in i:
title += i
if title:
title = lxml.html.fromstring(title).text_content()
else:
pass
if '[' in title:
place += re.split('\[', title)[-1]
params["headline"] = title
params["place"] = place[:-1]
def get_word_from_arguments(file_path) -> str:
files = absolute_file_paths(file_path)
params_array = []
for file in files:
parameters = {
'headline': '',
'place': '',
'date':'',
'day': "",
'words': '',
'byline': ''
}
paragraphs = get_doc(file, parameters)
everything_found = False
while not everything_found:
for para in paragraphs:
if is_date(para.text.rsplit(' ', 1)[0]):
parameters['date'] = para.text.rsplit(' ', 1)[0]
parameters['day'] = para.text.split(' ')[-1]
if 'length:' in para.text.lower():
parameters['words'] = re.split(' ', re.split(':', para.text.replace(u'\xa0', u''))[1])[0]
if 'byline:' in para.text.lower():
parameters['byline'] = re.split(':', para.text.replace(u'\xa0', u''))[1]
if is_everything_found(parameters):
break
everything_found = True
params_array.append(parameters)
print(params_array)
generate_output_file(params_array)
def is_everything_found(params: dict) -> bool :
filled = all(value for value in params.values())
if filled:
everything_found = True
return True
return False
def generate_output_file(parameters):
fileName = csv_file_name = parameters[0]['date'].split(' ')[-1] + '.csv'
with open(fileName, 'w', newline='\n') as file:
fieldNames = list(parameters[0].keys())
writer = csv.DictWriter(file, fieldnames=fieldNames)
writer.writeheader()
for param in parameters:
writer.writerow(param)
# resume = get_doc('F:\duplicates\10 Years On - How 9_11 Changed the World [column].DOCX')
get_word_from_arguments(r'F:\2012')
| 27.953125 | 141 | 0.589435 |
f508472c364b682f3e0bcfa8646b334b3ee05c5f
| 9,142 |
py
|
Python
|
ibis/tests/expr/test_signature.py
|
hussainsultan/ibis
|
cc6cfd49ce9ead3e793734b8e56b87040be093fc
|
[
"Apache-2.0"
] | null | null | null |
ibis/tests/expr/test_signature.py
|
hussainsultan/ibis
|
cc6cfd49ce9ead3e793734b8e56b87040be093fc
|
[
"Apache-2.0"
] | null | null | null |
ibis/tests/expr/test_signature.py
|
hussainsultan/ibis
|
cc6cfd49ce9ead3e793734b8e56b87040be093fc
|
[
"Apache-2.0"
] | null | null | null |
from inspect import Signature
import pytest
from toolz import identity
from ibis.expr.signature import (
Annotable,
Argument,
Optional,
Parameter,
Validator,
)
from ibis.tests.util import assert_pickle_roundtrip
class ValidatorFunction(Validator):
def __init__(self, fn):
self.fn = fn
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
class InstanceOf(Validator):
def __init__(self, typ):
self.typ = typ
def __call__(self, arg, **kwargs):
if not isinstance(arg, self.typ):
raise TypeError(self.typ)
return arg
IsAny = InstanceOf(object)
IsBool = InstanceOf(bool)
IsFloat = InstanceOf(float)
IsInt = InstanceOf(int)
IsStr = InstanceOf(str)
class Op(Annotable):
__slots__ = ('_cache', '_hash')
class ValueOp(Op):
arg = InstanceOf(object)
class StringOp(ValueOp):
arg = InstanceOf(str)
class MagicString(StringOp):
foo = Argument(str)
bar = Argument(bool)
baz = Argument(int)
def test_argument_is_deprecated():
msg = r".*Argument.* is deprecated .* v3\.0; use Validator\."
with pytest.warns(FutureWarning, match=msg):
Argument(str)
@pytest.mark.parametrize('validator', [3, 'coerce'])
def test_invalid_validator(validator):
with pytest.raises(TypeError):
Argument(validator)
def test_invalid_arity_validator():
arg = Argument(lambda x, y: x + y)
with pytest.raises(TypeError):
arg('value')
def test_argument_raise_on_missing_value():
validator = Argument(lambda x: x)
expected_msg = "missing 1 required positional argument"
with pytest.raises(TypeError, match=expected_msg):
validator()
expected_msg = "got an unexpected keyword argument 'name'"
with pytest.raises(TypeError, match=expected_msg):
validator(name='mandatory')
@pytest.mark.parametrize(
('default', 'expected'),
[(None, None), (0, 0), ('default', 'default'), (lambda: 3, 3)],
)
def test_optional_argument(default, expected):
validator = Optional(lambda x: x, default=default)
assert validator(None) == expected
@pytest.mark.parametrize(
('validator', 'value', 'expected'),
[
(Optional(identity, default=None), None, None),
(Optional(identity, default=None), 'three', 'three'),
(Optional(identity, default=1), None, 1),
(Optional(identity, default=lambda: 8), 'cat', 'cat'),
(Optional(identity, default=lambda: 8), None, 8),
(Optional(int, default=11), None, 11),
(Optional(int, default=None), None, None),
(Optional(int, default=None), 18, 18),
(Optional(str, default=None), 'caracal', 'caracal'),
],
)
def test_valid_optional(validator, value, expected):
assert validator(value) == expected
@pytest.mark.parametrize(
('arg', 'value', 'expected'),
[
(Optional(IsInt, default=''), None, TypeError),
(Optional(IsInt), 'lynx', TypeError),
],
)
def test_invalid_optional(arg, value, expected):
with pytest.raises(expected):
arg(value)
def test_annotable():
class Between(Annotable):
value = IsInt
lower = Optional(IsInt, default=0)
upper = Optional(IsInt, default=None)
class InBetween(Between):
pass
argnames = ('value', 'lower', 'upper')
signature = Between.__signature__
assert isinstance(signature, Signature)
assert tuple(signature.parameters.keys()) == argnames
assert Between.__slots__ == argnames
obj = Between(10, lower=2)
assert obj.value == 10
assert obj.lower == 2
assert obj.upper is None
assert obj.args == (10, 2, None)
assert obj.argnames == argnames
assert obj.__slots__ == ("value", "lower", "upper")
assert not hasattr(obj, "__dict__")
# test that a child without additional arguments doesn't have __dict__
obj = InBetween(10, lower=2)
assert obj.__slots__ == tuple()
assert not hasattr(obj, "__dict__")
def test_maintain_definition_order():
class Between(Annotable):
value = IsInt
lower = Optional(IsInt, default=0)
upper = Optional(IsInt, default=None)
param_names = list(Between.__signature__.parameters.keys())
assert param_names == ['value', 'lower', 'upper']
def test_signature_inheritance():
class IntBinop(Annotable):
left = IsInt
right = IsInt
class FloatAddRhs(IntBinop):
right = IsFloat
class FloatAddClip(FloatAddRhs):
left = IsFloat
clip_lower = Optional(IsInt, default=0)
clip_upper = Optional(IsInt, default=10)
class IntAddClip(FloatAddClip, IntBinop):
pass
assert IntBinop.__signature__ == Signature(
[
Parameter('left', validator=IsInt),
Parameter('right', validator=IsInt),
]
)
assert FloatAddRhs.__signature__ == Signature(
[
Parameter('left', validator=IsInt),
Parameter('right', validator=IsFloat),
]
)
assert FloatAddClip.__signature__ == Signature(
[
Parameter('left', validator=IsFloat),
Parameter('right', validator=IsFloat),
Parameter('clip_lower', validator=Optional(IsInt, default=0)),
Parameter('clip_upper', validator=Optional(IsInt, default=10)),
]
)
assert IntAddClip.__signature__ == Signature(
[
Parameter('left', validator=IsInt),
Parameter('right', validator=IsInt),
Parameter('clip_lower', validator=Optional(IsInt, default=0)),
Parameter('clip_upper', validator=Optional(IsInt, default=10)),
]
)
def test_positional_argument_reordering():
class Farm(Annotable):
ducks = IsInt
donkeys = IsInt
horses = IsInt
goats = IsInt
chickens = IsInt
class NoHooves(Farm):
horses = Optional(IsInt, default=0)
goats = Optional(IsInt, default=0)
donkeys = Optional(IsInt, default=0)
f1 = Farm(1, 2, 3, 4, 5)
f2 = Farm(1, 2, goats=4, chickens=5, horses=3)
f3 = Farm(1, 0, 0, 0, 100)
assert f1 == f2
assert f1 != f3
g1 = NoHooves(1, 2, donkeys=-1)
assert g1.ducks == 1
assert g1.chickens == 2
assert g1.donkeys == -1
assert g1.horses == 0
assert g1.goats == 0
def test_copy_default():
default = []
class Op(Annotable):
arg = Optional(InstanceOf(list), default=default)
op = Op()
assert op.arg is not default
def test_slots_are_inherited_and_overridable():
class Op(Annotable):
__slots__ = ('_cache',) # first definition
arg = ValidatorFunction(lambda x: x)
class StringOp(Op):
arg = ValidatorFunction(str) # new overridden slot
class StringSplit(StringOp):
sep = ValidatorFunction(str) # new slot
class StringJoin(StringOp):
__slots__ = ('_memoize',) # new slot
sep = ValidatorFunction(str) # new overridden slot
assert Op.__slots__ == ('_cache', 'arg')
assert StringOp.__slots__ == ('arg',)
assert StringSplit.__slots__ == ('sep',)
assert StringJoin.__slots__ == ('_memoize', 'sep')
def test_multiple_inheritance():
# multiple inheritance is allowed only if one of the parents has non-empty
# __slots__ definition, otherwise python will raise lay-out conflict
class Op(Annotable):
__slots__ = ('_hash',)
class ValueOp(Annotable):
arg = InstanceOf(object)
class Reduction(ValueOp):
_reduction = True
class UDF(ValueOp):
func = ValidatorFunction(lambda fn, this: fn)
class UDAF(UDF, Reduction):
arity = IsInt
class A(Annotable):
a = IsInt
class B(Annotable):
b = IsInt
msg = "multiple bases have instance lay-out conflict"
with pytest.raises(TypeError, match=msg):
class AB(A, B):
ab = IsInt
assert UDAF.__slots__ == ('arity',)
strlen = UDAF(arg=2, func=lambda value: len(str(value)), arity=1)
assert strlen.arg == 2
assert strlen.arity == 1
assert strlen._reduction is True
@pytest.mark.parametrize(
"obj",
[
MagicString(arg="something", foo="magic", bar=True, baz=8),
Parameter("test"),
],
)
def test_pickling_support(obj):
assert_pickle_roundtrip(obj)
def test_multiple_inheritance_argument_order():
class ValueOp(Annotable):
arg = IsAny
class VersionedOp(ValueOp):
version = IsInt
class Reduction(Annotable):
_reduction = True
class Sum(VersionedOp, Reduction):
where = Optional(IsBool, default=False)
assert Sum._reduction is True
assert str(Sum.__signature__) == "(arg, version, where=None)"
def test_multiple_inheritance_optional_argument_order():
class ValueOp(Annotable):
pass
class ConditionalOp(Annotable):
where = Optional(IsBool, default=False)
class Between(ValueOp, ConditionalOp):
min = IsInt
max = IsInt
how = Optional(IsStr, default="strict")
assert str(Between.__signature__) == "(min, max, where=None, how=None)"
| 25.824859 | 78 | 0.63531 |
d5187f01708a4f176f5149c2db54abe75dfed1d8
| 3,751 |
py
|
Python
|
example.py
|
jaxxtrend/NodeGraphQt
|
86dca1efac5a576eeda3bb361b330d490a4be89b
|
[
"MIT"
] | null | null | null |
example.py
|
jaxxtrend/NodeGraphQt
|
86dca1efac5a576eeda3bb361b330d490a4be89b
|
[
"MIT"
] | null | null | null |
example.py
|
jaxxtrend/NodeGraphQt
|
86dca1efac5a576eeda3bb361b330d490a4be89b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
from NodeGraphQt import NodeGraph
from NodeGraphQt import BaseNode
from NodeGraphQt import BackdropNode
from NodeGraphQt import NodeTreeWidget
from NodeGraphQt import setup_context_menu
from NodeGraphQt import PropertiesBinWidget
from NodeGraphQt import QtCore
from NodeGraphQt import QtWidgets
# import example nodes from the "example_nodes" package
from example_nodes import basic_nodes, widget_nodes
class MyNode(BaseNode):
"""
example test node.
"""
# set a unique node identifier.
__identifier__ = 'com.chantasticvfx'
# set the initial default node name.
NODE_NAME = 'my node'
def __init__(self):
super(MyNode, self).__init__()
self.set_color(25, 58, 51)
# create input and output port.
self.add_input('in port')
self.add_output('out port')
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
# create node graph.
graph = NodeGraph()
# set up default menu and commands.
setup_context_menu(graph)
# viewer widget used for the node graph.
viewer = graph.viewer()
viewer.resize(1100, 800)
viewer.show()
# show the properties bin when a node is "double clicked" in the graph.
properties_bin = PropertiesBinWidget(node_graph=graph)
properties_bin.setWindowFlags(QtCore.Qt.Tool)
def show_prop_bin(node):
if not properties_bin.isVisible():
properties_bin.show()
graph.node_double_clicked.connect(show_prop_bin)
# show the nodes list when a node is "double clicked" in the graph.
node_tree = NodeTreeWidget(node_graph=graph)
def show_nodes_list(node):
if not node_tree.isVisible():
node_tree.update()
node_tree.show()
graph.node_double_clicked.connect(show_nodes_list)
# registered nodes.
reg_nodes = [
BackdropNode, MyNode,
basic_nodes.FooNode,
basic_nodes.BarNode,
widget_nodes.DropdownMenuNode,
widget_nodes.TextInputNode,
widget_nodes.CheckboxNode
]
for n in reg_nodes:
graph.register_node(n)
my_node = graph.create_node('com.chantasticvfx.MyNode',
name='chantastic!',
color='#0a1e20',
text_color='#feab20',
pos=[310, 10])
foo_node = graph.create_node('com.chantasticvfx.FooNode',
name='node',
pos=[-480, 140])
foo_node.set_disabled(True)
# create example "TextInputNode".
text_node = graph.create_node('com.chantasticvfx.TextInputNode',
name='text node',
pos=[-480, -160])
# create example "TextInputNode".
checkbox_node = graph.create_node('com.chantasticvfx.CheckboxNode',
name='checkbox node',
pos=[-480, -60])
# create node with a combo box menu.
menu_node = graph.create_node('com.chantasticvfx.DropdownMenuNode',
name='menu node',
pos=[280, -200])
# change node icon.
this_path = os.path.dirname(os.path.abspath(__file__))
icon = os.path.join(this_path, 'example_nodes', 'pear.png')
bar_node = graph.create_node('com.chantasticvfx.BarNode')
bar_node.set_icon(icon)
bar_node.set_name('icon node')
bar_node.set_pos(-70, 10)
# connect the nodes
foo_node.set_output(0, bar_node.input(2))
menu_node.set_input(0, bar_node.output(1))
bar_node.set_input(0, text_node.output(0))
app.exec_()
| 30.495935 | 75 | 0.620368 |
17d180e401adccbce8af6ab0fa1806ca467d512d
| 17,967 |
py
|
Python
|
Ridiculous_Pullup/cup_movement1.py
|
hpladds/Crop_Circle_Coffeemaker_V1.0
|
22ab7fb162475106675d052bcb81210dd7fe6c1f
|
[
"MIT"
] | null | null | null |
Ridiculous_Pullup/cup_movement1.py
|
hpladds/Crop_Circle_Coffeemaker_V1.0
|
22ab7fb162475106675d052bcb81210dd7fe6c1f
|
[
"MIT"
] | null | null | null |
Ridiculous_Pullup/cup_movement1.py
|
hpladds/Crop_Circle_Coffeemaker_V1.0
|
22ab7fb162475106675d052bcb81210dd7fe6c1f
|
[
"MIT"
] | null | null | null |
import RPi.GPIO as GPIO
import time
import sys
import math
#from math import cos, sin, radians, sqrt, pow
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
coil_A_1_pin1 = 27 #orange
coil_A_2_pin1 = 22 #yellow
coil_B_1_pin1 = 10 #pink
coil_B_2_pin1 = 9 #blue
coil_A_1_pin3 = 21 #orange
coil_A_2_pin3 = 20 #yellow
coil_B_1_pin3 = 16 #pink
coil_B_2_pin3 = 12 #blue
coil_A_1_pin4 = 7 #orange
coil_A_2_pin4 = 8 #yellow
coil_B_1_pin4 = 25 #pink
coil_B_2_pin4 = 24 #blue
delay = float(2.65)#Can't be smaller than 1 or miss steps. If increase, slows down movements
stepSize = 0.005737 #inches per step-- need to change this if change spool size on steppers
xHome = 4.9125 #inches-- home (center of triangle)
yHome = 2.9375 #inches-- home (center of triangle)
x = xHome
y = yHome
X3 = 5.01 #position of stepper3
Y3 = 8.69 #position of stepper3
X4 = 9.825 #position of stepper4
step1Leftover = 0
step3Leftover = 0
step4Leftover = 0
#if you run file without current_coords.txt in folder (or not accurate)
#comment out from here
F = open("/home/pi/Ridiculous/current_coords.txt","r")
x = float(F.readline())
y = float(F.readline())
step1Leftover = float(F.readline())
step3Leftover = float(F.readline())
step4Leftover = float(F.readline())
F.close()
#to here -- then uncomment for further runs
#distance = float(sys.argv [1]) #inches
#angle = float(sys.argv [2]) #degrees
#delay = float (sys.argv [1])
#steps = sys.argv [2]
GPIO.setup(coil_A_1_pin1, GPIO.OUT)
GPIO.setup(coil_A_2_pin1, GPIO.OUT)
GPIO.setup(coil_B_1_pin1, GPIO.OUT)
GPIO.setup(coil_B_2_pin1, GPIO.OUT)
GPIO.setup(coil_A_1_pin3, GPIO.OUT)
GPIO.setup(coil_A_2_pin3, GPIO.OUT)
GPIO.setup(coil_B_1_pin3, GPIO.OUT)
GPIO.setup(coil_B_2_pin3, GPIO.OUT)
GPIO.setup(coil_A_1_pin4, GPIO.OUT)
GPIO.setup(coil_A_2_pin4, GPIO.OUT)
GPIO.setup(coil_B_1_pin4, GPIO.OUT)
GPIO.setup(coil_B_2_pin4, GPIO.OUT)
#deltaX = distance*math.cos(math.radians(angle))
#deltaY = distance*math.sin(math.radians(angle))
#deltaL1 = deltaX*sqrt(1+pow((deltaY/deltaX),2))
#if (deltaX == 0)
#deltaL1 = deltaX*math.sqrt(1+math.pow((deltaX/deltaY),2))
#The problem here seems to be in the square root and syntax for float vs int.
#print deltaX
#print deltaY
#print deltaL1
def moveStep(delay, step1, step3, step4):
if step1 == 1:
setStep1(1, 0, 0, 0)
else:
if step1 == -1:
setStep1(1, 0, 0, 1)
if step3 == 1:
setStep3(1, 0, 0, 0)
else:
if step3 == -1:
setStep3(1, 0, 0, 1)
if step4 == 1:
setStep4(1, 0, 0, 0)
else:
if step4 == -1:
setStep4(1, 0, 0, 1)
#first step done
time.sleep(delay)
if step1 == 1:
setStep1(1, 1, 0, 0)
else:
if step1 == -1:
setStep1(0, 0, 0, 1)
if step3 == 1:
setStep3(1, 1, 0, 0)
else:
if step3 == -1:
setStep3(0, 0, 0, 1)
if step4 == 1:
setStep4(1, 1, 0, 0)
else:
if step4 == -1:
setStep4(0, 0, 0, 1)
#second step done
time.sleep(delay)
if step1 == 1:
setStep1(0, 1, 0, 0)
else:
if step1 == -1:
setStep1(0, 0, 1, 1)
if step3 == 1:
setStep3(0, 1, 0, 0)
else:
if step3 == -1:
setStep3(0, 0, 1, 1)
if step1 == 1:
setStep1(0, 1, 0, 0)
else:
if step1 == -1:
setStep1(0, 0, 1, 1)
#third step done
time.sleep(delay)
if step1 == 1:
setStep1(0, 1, 1, 0)
else:
if step1 == -1:
setStep1(0, 0, 1, 0)
if step3 == 1:
setStep3(0, 1, 1, 0)
else:
if step3 == -1:
setStep3(0, 0, 1, 0)
if step4 == 1:
setStep4(0, 1, 1, 0)
else:
if step4 == -1:
setStep4(0, 0, 1, 0)
#fourth step done
time.sleep(delay)
if step1 == 1:
setStep1(0, 0, 1, 0)
else:
if step1 == -1:
setStep1(0, 1, 1, 0)
if step3 == 1:
setStep3(0, 0, 1, 0)
else:
if step3 == -1:
setStep3(0, 1, 1, 0)
if step4 == 1:
setStep4(0, 0, 1, 0)
else:
if step4 == -1:
setStep4(0, 1, 1, 0)
#fifth step done
time.sleep(delay)
if step1 == 1:
setStep1(0, 0, 1, 1)
else:
if step1 == -1:
setStep1(0, 1, 0, 0)
if step3 == 1:
setStep3(0, 0, 1, 1)
else:
if step3 == -1:
setStep3(0, 1, 0, 0)
if step4 == 1:
setStep4(0, 0, 1, 1)
else:
if step4 == -1:
setStep4(0, 1, 0, 0)
#sixth step done
time.sleep(delay)
if step1 == 1:
setStep1(0, 0, 0, 1)
else:
if step1 == -1:
setStep1(1, 1, 0, 0)
if step3 == 1:
setStep3(0, 0, 0, 1)
else:
if step3 == -1:
setStep3(1, 1, 0, 0)
if step4 == 1:
setStep4(0, 0, 0, 1)
else:
if step4 == -1:
setStep4(1, 1, 0, 0)
#seventh step done
time.sleep(delay)
if step1 == 1:
setStep1(1, 0, 0, 1)
else:
if step1 == -1:
setStep1(1, 0, 0, 0)
if step3 == 1:
setStep3(1, 0, 0, 1)
else:
if step3 == -1:
setStep3(1, 0, 0, 0)
if step4 == 1:
setStep4(1, 0, 0, 1)
else:
if step4 == -1:
setStep4(1, 0, 0, 0)
#eigth step done
time.sleep(delay)
def setStep1(w1, w2, w3, w4):
GPIO.output(coil_A_1_pin1, w1)
GPIO.output(coil_A_2_pin1, w2)
GPIO.output(coil_B_1_pin1, w3)
GPIO.output(coil_B_2_pin1, w4)
def setStep3(w1, w2, w3, w4):
GPIO.output(coil_A_1_pin3, w1)
GPIO.output(coil_A_2_pin3, w2)
GPIO.output(coil_B_1_pin3, w3)
GPIO.output(coil_B_2_pin3, w4)
def setStep4(w1, w2, w3, w4):
GPIO.output(coil_A_1_pin4, w1)
GPIO.output(coil_A_2_pin4, w2)
GPIO.output(coil_B_1_pin4, w3)
GPIO.output(coil_B_2_pin4, w4)
def moveLine (distance, angle):
#steps = int(distance/stepSize) #need to add the remainder to loffsets for next move
distance = float(distance)
angle = float(angle)
steps = int(abs(distance/stepSize))
deltaX = distance*math.cos(math.radians(angle))
deltaY = distance*math.sin(math.radians(angle))
global x
global y
global step1Leftover
global step3Leftover
global step4Leftover
for i in range(0, steps):
step1 = 0
step3 = 0
step4 = 0
deltaL1 = (x/math.sqrt(math.pow(x,2)+math.pow(y,2)))*(deltaX/steps)+(y/math.sqrt(math.pow(x,2)+math.pow(y,2)))*((deltaY/steps))
deltaL1 = deltaL1+step1Leftover
#print "Delta1: "
#print deltaL1
#print "step1Leftover: "
#print step1Leftover
if deltaL1 >= stepSize:
step1 = 1
step1Leftover = deltaL1-stepSize #deltaL1%stepSize
elif deltaL1 <= -stepSize:
step1 = -1
step1Leftover = deltaL1+stepSize #deltaL1%-stepSize
else:
step1Leftover = deltaL1
#print "1 didn't move"
deltaL3 = -((X3-x)/math.sqrt(math.pow(x,2)-2*X3*x+math.pow(y,2)-2*Y3*y+math.pow(X3,2)+math.pow(Y3,2)))*(deltaX/steps)-((Y3-y)/math.sqrt(math.pow(x,2)-2*X3*x+math.pow(y,2)-2*Y3*y+math.pow(X3,2)+math.pow(Y3,2)))*(deltaY/steps)
#print "Delta3: "
#print deltaL3
deltaL3 = deltaL3+step3Leftover
if deltaL3 >= stepSize:
step3 = 1
step3Leftover = deltaL3-stepSize #deltaL3%stepSize
elif deltaL3 <= -stepSize:
step3 = -1
step3Leftover = deltaL3+stepSize #deltaL3%-stepSize
else:
step3Leftover = deltaL3
#print "3 didn't move"
deltaL4 = -((X4-x)/math.sqrt(math.pow(x,2)-2*X4*x+math.pow(y,2)+math.pow(X4,2)))*(deltaX/steps)+(y/(math.sqrt(math.pow(x,2)-2*X4*x+math.pow(y,2)+math.pow(X4,2))))*(deltaY/steps)
#print "Delta4: "
#print deltaL4
deltaL4 = deltaL4+step4Leftover
if deltaL4 >= stepSize:
step4 = 1
step4Leftover = deltaL4-stepSize #deltaL4%stepSize
#need to add code to update loffsets if deltaL1 < 0 or if there is a remainder
elif deltaL4 <= -stepSize:
step4 = -1
step4Leftover = deltaL4+stepSize #deltaL4%-stepSize
#need to add code to update loffsets if deltaL1 < 0 or if there is a remainder
else:
step4Leftover = deltaL4
#print "4 didn't move"
moveStep(float(delay)/1000.0, step1, step3, step4)
#print "x: "
#print x
#print "y: "
#print y
x = x + deltaX/steps
y = y + deltaY/steps
def moveCircle(degrees):
global x
global y
global step1Leftover
global step3Leftover
global step4Leftover
radius = math.sqrt(math.pow((x-xHome),2)+math.pow((y-yHome),2))
startAngle = 0
if y >= yHome:
startAngle = math.acos((x-xHome)/radius)*180/math.pi #degrees
elif y < yHome:
startAngle = 360 - math.acos((x-xHome)/radius)*180/math.pi #degrees
angle = startAngle
degrees = float(degrees)
steps = int(abs(((degrees/360)*2*math.pi*radius)/stepSize))
for i in range(0, steps):
step1 = 0
step3 = 0
step4 = 0
deltaX = radius*math.cos((angle+degrees/steps)*math.pi/180)-radius*math.cos((angle)*math.pi/180)
deltaY = radius*math.sin((angle+degrees/steps)*math.pi/180)-radius*math.sin((angle)*math.pi/180)
deltaL1 = (x/math.sqrt(math.pow(x,2)+math.pow(y,2)))*(deltaX)+(y/math.sqrt(math.pow(x,2)+math.pow(y,2)))*((deltaY))
deltaL1 = deltaL1+step1Leftover
if deltaL1 >= stepSize:
step1 = 1
step1Leftover = deltaL1-stepSize #deltaL1%stepSize
elif deltaL1 <= -stepSize:
step1 = -1
step1Leftover = deltaL1+stepSize #deltaL1%-stepSize
else:
step1Leftover = deltaL1
#print "1 didn't move"
deltaL3 = -((X3-x)/math.sqrt(math.pow(x,2)-2*X3*x+math.pow(y,2)-2*Y3*y+math.pow(X3,2)+math.pow(Y3,2)))*(deltaX)-((Y3-y)/math.sqrt(math.pow(x,2)-2*X3*x+math.pow(y,2)-2*Y3*y+math.pow(X3,2)+math.pow(Y3,2)))*(deltaY)
deltaL3 = deltaL3+step3Leftover
if deltaL3 >= stepSize:
step3 = 1
step3Leftover = deltaL3-stepSize #deltaL3%stepSize
elif deltaL3 <= -stepSize:
step3 = -1
step3Leftover = deltaL3+stepSize #deltaL3%-stepSize
else:
step3Leftover = deltaL3
#print "3 didn't move"
deltaL4 = -((X4-x)/math.sqrt(math.pow(x,2)-2*X4*x+math.pow(y,2)+math.pow(X4,2)))*(deltaX)+(y/(math.sqrt(math.pow(x,2)-2*X4*x+math.pow(y,2)+math.pow(X4,2))))*(deltaY)
deltaL4 = deltaL4+step4Leftover
if deltaL4 >= stepSize:
step4 = 1
step4Leftover = deltaL4-stepSize #deltaL4%stepSize
#need to add code to update loffsets if deltaL1 < 0 or if there is a remainder
elif deltaL4 <= -stepSize:
step4 = -1
step4Leftover = deltaL4+stepSize #deltaL4%-stepSize
#need to add code to update loffsets if deltaL1 < 0 or if there is a remainder
else:
step4Leftover = deltaL4
#print "4 didn't move"
moveStep(float(delay)/1000.0, step1, step3, step4)
angle = angle + degrees/steps
x = x + deltaX
y = y + deltaY
def moveCircle2(radius2, degrees):
global x
global y
global step1Leftover
global step3Leftover
global step4Leftover
#radius = math.sqrt(math.pow((x-xHome),2)+math.pow((y-yHome),2))
startAngle = 0
radius = math.sqrt(math.pow((x-xHome),2)+math.pow((y-yHome),2))
if y >= yHome:
startAngle = math.acos((x-xHome)/radius)*180/math.pi #degrees
elif y < yHome:
startAngle = 360 - math.acos((x-xHome)/radius)*180/math.pi #degrees
angle = startAngle
degrees = float(degrees)
steps = int(abs(((degrees/360)*2*math.pi*radius2)/stepSize))
for i in range(0, steps):
step1 = 0
step3 = 0
step4 = 0
deltaX = radius2*math.cos((angle+degrees/steps)*math.pi/180)-radius2*math.cos((angle)*math.pi/180)
deltaY = radius2*math.sin((angle+degrees/steps)*math.pi/180)-radius2*math.sin((angle)*math.pi/180)
deltaL1 = (x/math.sqrt(math.pow(x,2)+math.pow(y,2)))*(deltaX)+(y/math.sqrt(math.pow(x,2)+math.pow(y,2)))*((deltaY))
deltaL1 = deltaL1+step1Leftover
if deltaL1 >= stepSize:
step1 = 1
step1Leftover = deltaL1-stepSize #deltaL1%stepSize
elif deltaL1 <= -stepSize:
step1 = -1
step1Leftover = deltaL1+stepSize #deltaL1%-stepSize
else:
step1Leftover = deltaL1
#print "1 didn't move"
deltaL3 = -((X3-x)/math.sqrt(math.pow(x,2)-2*X3*x+math.pow(y,2)-2*Y3*y+math.pow(X3,2)+math.pow(Y3,2)))*(deltaX)-((Y3-y)/math.sqrt(math.pow(x,2)-2*X3*x+math.pow(y,2)-2*Y3*y+math.pow(X3,2)+math.pow(Y3,2)))*(deltaY)
deltaL3 = deltaL3+step3Leftover
if deltaL3 >= stepSize:
step3 = 1
step3Leftover = deltaL3-stepSize #deltaL3%stepSize
elif deltaL3 <= -stepSize:
step3 = -1
step3Leftover = deltaL3+stepSize #deltaL3%-stepSize
else:
step3Leftover = deltaL3
#print "3 didn't move"
deltaL4 = -((X4-x)/math.sqrt(math.pow(x,2)-2*X4*x+math.pow(y,2)+math.pow(X4,2)))*(deltaX)+(y/(math.sqrt(math.pow(x,2)-2*X4*x+math.pow(y,2)+math.pow(X4,2))))*(deltaY)
deltaL4 = deltaL4+step4Leftover
if deltaL4 >= stepSize:
step4 = 1
step4Leftover = deltaL4-stepSize #deltaL4%stepSize
#need to add code to update loffsets if deltaL1 < 0 or if there is a remainder
elif deltaL4 <= -stepSize:
step4 = -1
step4Leftover = deltaL4+stepSize #deltaL4%-stepSize
#need to add code to update loffsets if deltaL1 < 0 or if there is a remainder
else:
step4Leftover = deltaL4
#print "4 didn't move"
moveStep(float(delay)/1000.0, step1, step3, step4)
angle = angle + degrees/steps
x = x + deltaX
y = y + deltaY
def moveCircle3(radius3, degrees, dirCtrAngle):
#This makes direction the direction from current position the center should be 1 radius3 away in the direction dirCtrAngle
global x
global y
global step1Leftover
global step3Leftover
global step4Leftover
#radius = math.sqrt(math.pow((x-xHome),2)+math.pow((y-yHome),2))
startAngle = 0
#radius = math.sqrt(math.pow((x-xHome),2)+math.pow((y-yHome),2))
if 0 <= dirCtrAngle < 180:
startAngle = dirCtrAngle + 180 #degrees
elif 180 <= dirCtrAngle < 360 :
startAngle = dirCtrAngle - 180 #degrees
angle = startAngle
degrees = float(degrees)
steps = int(abs(((degrees/360)*2*math.pi*radius3)/stepSize))
for i in range(0, steps):
step1 = 0
step3 = 0
step4 = 0
deltaX = radius3*math.cos((angle+degrees/steps)*math.pi/180)-radius3*math.cos((angle)*math.pi/180)
deltaY = radius3*math.sin((angle+degrees/steps)*math.pi/180)-radius3*math.sin((angle)*math.pi/180)
deltaL1 = (x/math.sqrt(math.pow(x,2)+math.pow(y,2)))*(deltaX)+(y/math.sqrt(math.pow(x,2)+math.pow(y,2)))*((deltaY))
deltaL1 = deltaL1+step1Leftover
if deltaL1 >= stepSize:
step1 = 1
step1Leftover = deltaL1-stepSize #deltaL1%stepSize
elif deltaL1 <= -stepSize:
step1 = -1
step1Leftover = deltaL1+stepSize #deltaL1%-stepSize
else:
step1Leftover = deltaL1
#print "1 didn't move"
deltaL3 = -((X3-x)/math.sqrt(math.pow(x,2)-2*X3*x+math.pow(y,2)-2*Y3*y+math.pow(X3,2)+math.pow(Y3,2)))*(deltaX)-((Y3-y)/math.sqrt(math.pow(x,2)-2*X3*x+math.pow(y,2)-2*Y3*y+math.pow(X3,2)+math.pow(Y3,2)))*(deltaY)
deltaL3 = deltaL3+step3Leftover
if deltaL3 >= stepSize:
step3 = 1
step3Leftover = deltaL3-stepSize #deltaL3%stepSize
elif deltaL3 <= -stepSize:
step3 = -1
step3Leftover = deltaL3+stepSize #deltaL3%-stepSize
else:
step3Leftover = deltaL3
#print "3 didn't move"
deltaL4 = -((X4-x)/math.sqrt(math.pow(x,2)-2*X4*x+math.pow(y,2)+math.pow(X4,2)))*(deltaX)+(y/(math.sqrt(math.pow(x,2)-2*X4*x+math.pow(y,2)+math.pow(X4,2))))*(deltaY)
deltaL4 = deltaL4+step4Leftover
if deltaL4 >= stepSize:
step4 = 1
step4Leftover = deltaL4-stepSize #deltaL4%stepSize
#need to add code to update loffsets if deltaL1 < 0 or if there is a remainder
elif deltaL4 <= -stepSize:
step4 = -1
step4Leftover = deltaL4+stepSize #deltaL4%-stepSize
#need to add code to update loffsets if deltaL1 < 0 or if there is a remainder
else:
step4Leftover = deltaL4
#print "4 didn't move"
moveStep(float(delay)/1000.0, step1, step3, step4)
angle = angle + degrees/steps
x = x + deltaX
y = y + deltaY
def goHome():
global x
global y
global step1Leftover
global step3Leftover
global step4Leftover
radius = math.sqrt(math.pow((x-xHome),2)+math.pow((y-yHome),2))
startAngle = 0
if y >= yHome:
startAngle = math.acos((x-xHome)/radius)*180/math.pi #degrees
elif y < yHome:
startAngle = 360 - math.acos((x-xHome)/radius)*180/math.pi #degrees
moveLine(radius,180+startAngle)
while True:
#distance = 2.5 #inches
# angle = 60 #degrees
######################### Start movement code.
moveLine(.625, 337.5) # move pour to the west
moveLine(.5, 0) # move pour to the west
moveLine(.625, 22.5) #move pour to soutwest 22.5 degrees
moveLine(.625, 45) #move pour to southwest 45 degrees
moveLine(.625, 135) #pour to souteast 45 degrees
moveLine(.625, 157.5) #pour to souteast 22.5 degrees
moveLine(.5, 180) #pour to east
moveLine(.625, 202.5) #pour to northeast 22.5 degrees
moveLine(.83, 270) #pour to north # less .25 to accommodate the spout's curve.
moveLine(1.66, 0) #pour to the west
moveLine(.88, 90) #pour to the south
moveLine(1.66, 180) #pour to the east
moveLine(.88, 270) #pour to the north
moveLine(.625, 337.5) # move pour to the west
moveLine(.5, 0) # move pour to the west
moveLine(.625, 22.5) #move pour to soutwest 22.5 degrees
moveLine(.625, 45) #move pour to southwest 45 degrees
moveLine(.625, 135) #pour to souteast 45 degrees
moveLine(.625, 157.5) #pour to souteast 22.5 degrees
moveLine(.5, 180) #pour to east
moveLine(.625, 202.5) #pour to northeast 22.5 degrees
moveLine(.88, 270) #pour to north # less .25 to accommodate the spout's curve.
moveLine(1.66, 0) #pour to the west
moveLine(.88, 90) #pour to the south
moveLine(1.66, 180) #pour to the east
moveLine(.88, 270) #pour to the north
######################### End movement code.
F = open("/home/pi/Ridiculous/current_coords.txt","w")
F.write(str(x))
F.write("\n")
F.write(str(y))
F.write("\n")
F.write(str(step1Leftover))
F.write("\n")
F.write(str(step3Leftover))
F.write("\n")
F.write(str(step4Leftover))
F.close()
break
| 29.648515 | 228 | 0.631435 |
35d2ebc9b9f78694a33df22f9db436a532dbf1e5
| 9,163 |
py
|
Python
|
pytorchvideo/layers/convolutions.py
|
kevinmtian/pytorchvideo
|
168e16859a6029ef8ebeb476f9163bebb6c6b87d
|
[
"Apache-2.0"
] | 2,391 |
2021-04-13T18:10:18.000Z
|
2022-03-31T15:07:09.000Z
|
pytorchvideo/layers/convolutions.py
|
kevinmtian/pytorchvideo
|
168e16859a6029ef8ebeb476f9163bebb6c6b87d
|
[
"Apache-2.0"
] | 156 |
2021-04-13T18:51:49.000Z
|
2022-03-31T08:05:50.000Z
|
pytorchvideo/layers/convolutions.py
|
kevinmtian/pytorchvideo
|
168e16859a6029ef8ebeb476f9163bebb6c6b87d
|
[
"Apache-2.0"
] | 231 |
2021-04-14T05:04:55.000Z
|
2022-03-22T09:35:46.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from typing import Callable, Optional, Tuple
import torch
import torch.nn as nn
from pytorchvideo.layers.utils import set_attributes
from torch.nn.common_types import _size_3_t
class ConvReduce3D(nn.Module):
"""
Builds a list of convolutional operators and performs summation on the outputs.
::
Conv3d, Conv3d, ..., Conv3d
↓
Sum
"""
def __init__(
self,
*,
in_channels: int,
out_channels: int,
kernel_size: Tuple[_size_3_t],
stride: Optional[Tuple[_size_3_t]] = None,
padding: Optional[Tuple[_size_3_t]] = None,
padding_mode: Optional[Tuple[str]] = None,
dilation: Optional[Tuple[_size_3_t]] = None,
groups: Optional[Tuple[int]] = None,
bias: Optional[Tuple[bool]] = None,
reduction_method: str = "sum",
) -> None:
"""
Args:
in_channels int: number of input channels.
out_channels int: number of output channels produced by the convolution(s).
kernel_size tuple(_size_3_t): Tuple of sizes of the convolutionaling kernels.
stride tuple(_size_3_t): Tuple of strides of the convolutions.
padding tuple(_size_3_t): Tuple of paddings added to all three sides of the
input.
padding_mode tuple(string): Tuple of padding modes for each convs.
Options include `zeros`, `reflect`, `replicate` or `circular`.
dilation tuple(_size_3_t): Tuple of spacings between kernel elements.
groups tuple(_size_3_t): Tuple of numbers of blocked connections from input
channels to output channels.
bias tuple(bool): If `True`, adds a learnable bias to the output.
reduction_method str: Options include `sum` and `cat`.
"""
super().__init__()
assert reduction_method in ("sum", "cat")
self.reduction_method = reduction_method
conv_list = []
for ind in range(len(kernel_size)):
conv_param = {
"in_channels": in_channels,
"out_channels": out_channels,
"kernel_size": kernel_size[ind],
}
if stride is not None and stride[ind] is not None:
conv_param["stride"] = stride[ind]
if padding is not None and padding[ind] is not None:
conv_param["padding"] = padding[ind]
if dilation is not None and dilation[ind] is not None:
conv_param["dilation"] = dilation[ind]
if groups is not None and groups[ind] is not None:
conv_param["groups"] = groups[ind]
if bias is not None and bias[ind] is not None:
conv_param["bias"] = bias[ind]
if padding_mode is not None and padding_mode[ind] is not None:
conv_param["padding_mode"] = padding_mode[ind]
conv_list.append(nn.Conv3d(**conv_param))
self.convs = nn.ModuleList(conv_list)
def forward(self, x: torch.Tensor) -> torch.Tensor:
output = []
for ind in range(len(self.convs)):
output.append(self.convs[ind](x))
if self.reduction_method == "sum":
output = torch.stack(output, dim=0).sum(dim=0, keepdim=False)
elif self.reduction_method == "cat":
output = torch.cat(output, dim=1)
return output
def create_conv_2plus1d(
*,
# Conv configs.
in_channels: int,
out_channels: int,
inner_channels: int = None,
conv_xy_first: bool = False,
kernel_size: Tuple[int] = (3, 3, 3),
stride: Tuple[int] = (2, 2, 2),
padding: Tuple[int] = (1, 1, 1),
bias: bool = False,
dilation: Tuple[int] = (1, 1, 1),
groups: int = 1,
# BN configs.
norm: Callable = nn.BatchNorm3d,
norm_eps: float = 1e-5,
norm_momentum: float = 0.1,
# Activation configs.
activation: Callable = nn.ReLU,
) -> nn.Module:
"""
Create a 2plus1d conv layer. It performs spatiotemporal Convolution, BN, and
Relu following by a spatiotemporal pooling.
::
Conv_t (or Conv_xy if conv_xy_first = True)
↓
Normalization
↓
Activation
↓
Conv_xy (or Conv_t if conv_xy_first = True)
Normalization options include: BatchNorm3d and None (no normalization).
Activation options include: ReLU, Softmax, Sigmoid, and None (no activation).
Args:
in_channels (int): input channel size of the convolution.
out_channels (int): output channel size of the convolution.
kernel_size (tuple): convolutional kernel size(s).
stride (tuple): convolutional stride size(s).
padding (tuple): convolutional padding size(s).
bias (bool): convolutional bias. If true, adds a learnable bias to the
output.
groups (int): Number of groups in convolution layers. value >1 is unsupported.
dilation (tuple): dilation value in convolution layers. value >1 is unsupported.
conv_xy_first (bool): If True, spatial convolution comes before temporal conv
norm (callable): a callable that constructs normalization layer, options
include nn.BatchNorm3d, None (not performing normalization).
norm_eps (float): normalization epsilon.
norm_momentum (float): normalization momentum.
activation (callable): a callable that constructs activation layer, options
include: nn.ReLU, nn.Softmax, nn.Sigmoid, and None (not performing
activation).
Returns:
(nn.Module): 2plus1d conv layer.
"""
if inner_channels is None:
inner_channels = out_channels
assert (
groups == 1
), "Support for groups is not implemented in R2+1 convolution layer"
assert (
max(dilation) == 1 and min(dilation) == 1
), "Support for dillaiton is not implemented in R2+1 convolution layer"
conv_t_module = nn.Conv3d(
in_channels=in_channels if not conv_xy_first else inner_channels,
out_channels=inner_channels if not conv_xy_first else out_channels,
kernel_size=(kernel_size[0], 1, 1),
stride=(stride[0], 1, 1),
padding=(padding[0], 0, 0),
bias=bias,
)
norm_module = (
None
if norm is None
else norm(num_features=inner_channels, eps=norm_eps, momentum=norm_momentum)
)
activation_module = None if activation is None else activation()
conv_xy_module = nn.Conv3d(
in_channels=inner_channels if not conv_xy_first else in_channels,
out_channels=out_channels if not conv_xy_first else inner_channels,
kernel_size=(1, kernel_size[1], kernel_size[2]),
stride=(1, stride[1], stride[2]),
padding=(0, padding[1], padding[2]),
bias=bias,
)
return Conv2plus1d(
conv_t=conv_t_module,
norm=norm_module,
activation=activation_module,
conv_xy=conv_xy_module,
conv_xy_first=conv_xy_first,
)
class Conv2plus1d(nn.Module):
"""
Implementation of 2+1d Convolution by factorizing 3D Convolution into an 1D temporal
Convolution and a 2D spatial Convolution with Normalization and Activation module
in between:
::
Conv_t (or Conv_xy if conv_xy_first = True)
↓
Normalization
↓
Activation
↓
Conv_xy (or Conv_t if conv_xy_first = True)
The 2+1d Convolution is used to build the R(2+1)D network.
"""
def __init__(
self,
*,
conv_t: nn.Module = None,
norm: nn.Module = None,
activation: nn.Module = None,
conv_xy: nn.Module = None,
conv_xy_first: bool = False,
) -> None:
"""
Args:
conv_t (torch.nn.modules): temporal convolution module.
norm (torch.nn.modules): normalization module.
activation (torch.nn.modules): activation module.
conv_xy (torch.nn.modules): spatial convolution module.
conv_xy_first (bool): If True, spatial convolution comes before temporal conv
"""
super().__init__()
set_attributes(self, locals())
assert self.conv_t is not None
assert self.conv_xy is not None
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv_xy(x) if self.conv_xy_first else self.conv_t(x)
x = self.norm(x) if self.norm else x
x = self.activation(x) if self.activation else x
x = self.conv_t(x) if self.conv_xy_first else self.conv_xy(x)
return x
| 38.5 | 89 | 0.588235 |
f07af40f6ded3e2cb53b63c252cbab1e5f4330aa
| 2,221 |
py
|
Python
|
tests/m2m_through_regress/test_multitable.py
|
JBKahn/django
|
32265361279b3316f5bce8efa71f2049409461e3
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 5,079 |
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
tests/m2m_through_regress/test_multitable.py
|
287977288/test
|
142e3626ab3c676574631383ae6b5a4eced5a10e
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1,623 |
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
tests/m2m_through_regress/test_multitable.py
|
287977288/test
|
142e3626ab3c676574631383ae6b5a4eced5a10e
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 2,033 |
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
from __future__ import unicode_literals
from django.test import TestCase
from .models import (
CompetingTeam, Event, Group, IndividualCompetitor, Membership, Person,
)
class MultiTableTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.alice = Person.objects.create(name='Alice')
cls.bob = Person.objects.create(name='Bob')
cls.chris = Person.objects.create(name='Chris')
cls.dan = Person.objects.create(name='Dan')
cls.team_alpha = Group.objects.create(name='Alpha')
Membership.objects.create(person=cls.alice, group=cls.team_alpha)
Membership.objects.create(person=cls.bob, group=cls.team_alpha)
cls.event = Event.objects.create(name='Exposition Match')
IndividualCompetitor.objects.create(event=cls.event, person=cls.chris)
IndividualCompetitor.objects.create(event=cls.event, person=cls.dan)
CompetingTeam.objects.create(event=cls.event, team=cls.team_alpha)
def test_m2m_query(self):
result = self.event.teams.all()
self.assertCountEqual(result, [self.team_alpha])
def test_m2m_reverse_query(self):
result = self.chris.event_set.all()
self.assertCountEqual(result, [self.event])
def test_m2m_query_proxied(self):
result = self.event.special_people.all()
self.assertCountEqual(result, [self.chris, self.dan])
def test_m2m_reverse_query_proxied(self):
result = self.chris.special_event_set.all()
self.assertCountEqual(result, [self.event])
def test_m2m_prefetch_proxied(self):
result = Event.objects.filter(name='Exposition Match').prefetch_related('special_people')
with self.assertNumQueries(2):
self.assertCountEqual(result, [self.event])
self.assertEqual(sorted([p.name for p in result[0].special_people.all()]), ['Chris', 'Dan'])
def test_m2m_prefetch_reverse_proxied(self):
result = Person.objects.filter(name='Dan').prefetch_related('special_event_set')
with self.assertNumQueries(2):
self.assertCountEqual(result, [self.dan])
self.assertEqual([event.name for event in result[0].special_event_set.all()], ['Exposition Match'])
| 42.711538 | 111 | 0.699235 |
18d7182295b130fd860b9460ee1ac341bc29d1ed
| 999 |
py
|
Python
|
2015/day12/day12.py
|
naitmare01/Adventofcode
|
34f2832fa7a18b76cf9827890632740c6f60679c
|
[
"MIT"
] | null | null | null |
2015/day12/day12.py
|
naitmare01/Adventofcode
|
34f2832fa7a18b76cf9827890632740c6f60679c
|
[
"MIT"
] | null | null | null |
2015/day12/day12.py
|
naitmare01/Adventofcode
|
34f2832fa7a18b76cf9827890632740c6f60679c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import re
import json
def arguments():
# Handle command line arguments
parser = argparse.ArgumentParser(description='Adventofcode.')
parser.add_argument('-f', '--file', required=True)
args = parser.parse_args()
return args
def calculate(json_data):
if type(json_data) == int:
return json_data
if type(json_data) == list:
return sum([calculate(json_data) for json_data in json_data])
if type(json_data) != dict:
return 0
if 'red' in json_data.values():
return 0
return calculate(list(json_data.values()))
def main():
args = arguments()
with open(args.file) as file:
input_file = file.read().strip()
all_numbers = [int(d) for d in re.findall(r'-?\d+', input_file)]
print("Part1:", sum(all_numbers))
all_numbers_part2 = json.loads(input_file)
print("Part2:", calculate(all_numbers_part2))
if __name__ == '__main__':
main()
| 22.704545 | 69 | 0.646647 |
b7eda156ebf57693a66daee90fa3693629496290
| 2,321 |
py
|
Python
|
model/intialisation.py
|
xhelenfu/Lung-Nodule-Attributes-CT-Network
|
ce8234920b26a16a8a87ab0f78b53a29152d8aae
|
[
"MIT"
] | null | null | null |
model/intialisation.py
|
xhelenfu/Lung-Nodule-Attributes-CT-Network
|
ce8234920b26a16a8a87ab0f78b53a29152d8aae
|
[
"MIT"
] | null | null | null |
model/intialisation.py
|
xhelenfu/Lung-Nodule-Attributes-CT-Network
|
ce8234920b26a16a8a87ab0f78b53a29152d8aae
|
[
"MIT"
] | null | null | null |
from torch.nn import init
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('Linear') != -1:
init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_normal_(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.xavier_normal_(m.weight.data, gain=1)
elif classname.find('BatchNorm') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_glorot(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform_(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.xavier_uniform_(m.weight.data, gain=1)
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_orthogonal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.orthogonal_(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.orthogonal_(m.weight.data, gain=1)
elif classname.find('BatchNorm') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def init_weights(net, init_type='kaiming'):
if init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'glorot':
net.apply(weights_init_glorot)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
| 34.132353 | 94 | 0.642826 |
bc76a76d57581bae4865293a2d7688d204ea1bca
| 2,766 |
py
|
Python
|
modelling/blocks/prior_analysis.py
|
hieu1999210/image_compression
|
3faf90d704782e1d6a186b0c8ea7fb1e2ec97a2c
|
[
"Apache-2.0"
] | null | null | null |
modelling/blocks/prior_analysis.py
|
hieu1999210/image_compression
|
3faf90d704782e1d6a186b0c8ea7fb1e2ec97a2c
|
[
"Apache-2.0"
] | null | null | null |
modelling/blocks/prior_analysis.py
|
hieu1999210/image_compression
|
3faf90d704782e1d6a186b0c8ea7fb1e2ec97a2c
|
[
"Apache-2.0"
] | null | null | null |
"""
modified by Hieu Nguyen
adapted from Tensorflow to Pytorch implementation
"""
# Copyright 2020 Hieu Nguyen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch.nn as nn
import math
class HyperpriorAnalysisTransform(nn.Module):
"""
adapted from https://github.com/tensorflow/compression/blob/master/models/bmshj2018.py
"""
def __init__(self, cfg):
super().__init__()
strides = cfg.MODEL.HYPER_PRIOR.STRIDES
kernels = cfg.MODEL.HYPER_PRIOR.KERNELS
in_channels = cfg.MODEL.LATENT_CHANNELS
inter_channels = cfg.MODEL.INTER_CHANNELS
layers = []
for i, (stride, kernel) in enumerate(zip(strides, kernels)):
use_bias = True if i < len(strides) - 1 else False
_in_channels = in_channels if i == 0 else inter_channels
conv = nn.Conv2d(
_in_channels, inter_channels, kernel,
stride=stride, padding=kernel//2, bias=use_bias)
# init weights and bias
nn.init.xavier_normal_(conv.weight.data, math.sqrt(2))
if use_bias:
nn.init.constant_(conv.bias.data, 0.01)
layers.append(conv)
if i < len(strides) - 1:
layers.append(nn.ReLU())
self._layers = nn.Sequential(*layers)
def forward(self, x):
return self._layers(x)
| 38.416667 | 91 | 0.610268 |
474cbe78a64fbf4ac40474772af02879e8ac1c83
| 226 |
py
|
Python
|
lazy_lxd/lib/lxd/__init__.py
|
vladisnik/lazy-lxd
|
04c8ddb286af5ccab0f7627dec3ea8e12f4408ba
|
[
"MIT"
] | 3 |
2020-08-11T13:50:44.000Z
|
2020-08-17T10:26:56.000Z
|
lazy_lxd/lib/lxd/__init__.py
|
vladisnik/lazy-lxd
|
04c8ddb286af5ccab0f7627dec3ea8e12f4408ba
|
[
"MIT"
] | null | null | null |
lazy_lxd/lib/lxd/__init__.py
|
vladisnik/lazy-lxd
|
04c8ddb286af5ccab0f7627dec3ea8e12f4408ba
|
[
"MIT"
] | null | null | null |
"""
Additional layer between LXD API and client.
Client supplemented by some useful functions.
Such as downloading image, wrappers above container states, etc.
"""
from .client import LXDClient
__all__ = [
'LXDClient'
]
| 18.833333 | 64 | 0.752212 |
cef27e3d47afaa1a5483b62221cf48f4866bf99f
| 38,704 |
py
|
Python
|
albert/run_classifier_sp.py
|
pertschuk/google-research
|
2a412e4c3654abcfccf9118116cbc73c801e6409
|
[
"Apache-2.0"
] | null | null | null |
albert/run_classifier_sp.py
|
pertschuk/google-research
|
2a412e4c3654abcfccf9118116cbc73c801e6409
|
[
"Apache-2.0"
] | null | null | null |
albert/run_classifier_sp.py
|
pertschuk/google-research
|
2a412e4c3654abcfccf9118116cbc73c801e6409
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""BERT finetuning runner with sentence piece tokenization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import time
import six
import tensorflow as tf
from six.moves import zip
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import data as contrib_data
from tensorflow.contrib import tpu as contrib_tpu
from albert import modeling
from albert import optimization
from albert import tokenization
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"albert_config_file", None,
"The config json file corresponding to the pre-trained ALBERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string(
"vocab_file", None,
"The vocabulary file that the ALBERT model was trained on.")
flags.DEFINE_string("spm_model_file", None,
"The model file for sentence piece tokenization.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained ALBERT model).")
flags.DEFINE_bool(
"use_pooled_output", True, "Whether to use the CLS token outputs")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 512,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
# Note(mingdachen): We will rely on this guid for GLUE submission.
guid = tokenization.preprocess_text(line[0], lower=FLAGS.do_lower_case)
text_a = tokenization.preprocess_text(line[8], lower=FLAGS.do_lower_case)
text_b = tokenization.preprocess_text(line[9], lower=FLAGS.do_lower_case)
if set_type == "test":
label = "contradiction"
else:
label = tokenization.preprocess_text(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.preprocess_text(line[3], lower=FLAGS.do_lower_case)
text_b = tokenization.preprocess_text(line[4], lower=FLAGS.do_lower_case)
if set_type == "test":
guid = line[0]
label = "0"
else:
label = tokenization.preprocess_text(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MSMarcoProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def _create_example(self, query, doc, label, set_type, i):
guid = "%s-%s" % (set_type, i)
text_a = tokenization.preprocess_text(query, lower=FLAGS.do_lower_case)
text_b = tokenization.preprocess_text(doc, lower=FLAGS.do_lower_case)
if set_type == "test":
# guid = line[0]
label = "0"
else:
label = tokenization.preprocess_text(label)
return InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)
def get_train_examples(self, data_dir):
"""See base class."""
print('Converting to Train to tfrecord...')
start_time = time.time()
train_dataset_path = './data/triples.train.small.tsv'
print('Counting number of examples...')
num_lines = 1000000 # sum(1 for line in open(train_dataset_path, 'r'))
print('{} examples found.'.format(num_lines))
examples = []
with open(train_dataset_path, 'r') as f:
for i, line in enumerate(f):
if i > 2000000:
break
if i % 1000 == 0:
time_passed = int(time.time() - start_time)
print('Processed training set, line {} of {} in {} sec'.format(
i, num_lines, time_passed))
hours_remaining = (num_lines - i) * time_passed / (max(1.0, i) * 3600)
print('Estimated hours remaining to write the training set: {}'.format(
hours_remaining))
query, positive_doc, negative_doc = line.rstrip().split('\t')
examples.append(self._create_example(query, positive_doc, str(1), 'train', i))
examples.append(self._create_example(query, negative_doc, str(0), 'train', i + 0.5))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
return []
def get_test_examples(self, data_dir):
"""See base class."""
return self.get_dev_examples(data_dir)
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.preprocess_text(line[3], lower=FLAGS.do_lower_case)
text_b = tokenization.preprocess_text(line[4], lower=FLAGS.do_lower_case)
if set_type == "test":
guid = line[0]
label = "0"
else:
label = tokenization.preprocess_text(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
guid = line[0]
text_a = tokenization.preprocess_text(
line[1], lower=FLAGS.do_lower_case)
label = "0"
else:
text_a = tokenization.preprocess_text(
line[3], lower=FLAGS.do_lower_case)
label = tokenization.preprocess_text(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in ALBERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
contrib_data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(albert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.AlbertModel(
config=albert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
if FLAGS.use_pooled_output:
tf.logging.info("using pooled output")
output_layer = model.get_pooled_output()
else:
tf.logging.info("using meaned output")
output_layer = tf.reduce_mean(model.get_sequence_output(), axis=1)
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, probabilities, predictions)
def model_fn_builder(albert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, probabilities, predictions) = \
create_model(albert_config, is_training, input_ids, input_mask,
segment_ids, label_ids, num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, predictions, is_real_example):
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids,
predictions, is_real_example])
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
predictions={
"probabilities": probabilities,
"predictions": predictions
},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
"msmarco": MSMarcoProcessor
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
albert_config = modeling.AlbertConfig.from_json_file(FLAGS.albert_config_file)
if FLAGS.max_seq_length > albert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the ALBERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, albert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case,
spm_model_file=FLAGS.spm_model_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
albert_config=albert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = contrib_tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
start = time.time()
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
tf.logging.info("Average rate: %s" % (len(eval_examples) / (time.time() - start)))
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
output_submit_file = os.path.join(FLAGS.output_dir, "submit_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as pred_writer, \
tf.gfile.GFile(output_submit_file, "w") as sub_writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, (example, prediction)) in \
enumerate(zip(predict_examples, result)):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
pred_writer.write(output_line)
actual_label = label_list[int(prediction["predictions"])]
sub_writer.write(
six.ensure_str(example.guid) + "\t" + actual_label + "\n")
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("albert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| 35.21747 | 92 | 0.686079 |
3cd9efc801dd659d6d3a30ff54a029847ae00680
| 2,552 |
py
|
Python
|
tests/components/whois/test_init.py
|
PiotrMachowski/core
|
b9d7d0cae2ccd2d88e90e49cc09e154a27ed809b
|
[
"Apache-2.0"
] | 1 |
2018-08-01T02:37:08.000Z
|
2018-08-01T02:37:08.000Z
|
tests/components/whois/test_init.py
|
PiotrMachowski/core
|
b9d7d0cae2ccd2d88e90e49cc09e154a27ed809b
|
[
"Apache-2.0"
] | 18 |
2021-11-24T06:26:13.000Z
|
2022-03-31T06:25:15.000Z
|
tests/components/whois/test_init.py
|
PiotrMachowski/core
|
b9d7d0cae2ccd2d88e90e49cc09e154a27ed809b
|
[
"Apache-2.0"
] | 3 |
2021-11-14T13:29:33.000Z
|
2021-12-27T17:05:22.000Z
|
"""Tests for the Whois integration."""
from unittest.mock import MagicMock
import pytest
from whois.exceptions import (
FailedParsingWhoisOutput,
UnknownDateFormat,
UnknownTld,
WhoisCommandFailed,
)
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.whois.const import DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import CONF_DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
async def test_load_unload_config_entry(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_whois: MagicMock,
) -> None:
"""Test the Whois configuration entry loading/unloading."""
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
assert mock_config_entry.state is ConfigEntryState.LOADED
assert len(mock_whois.mock_calls) == 1
await hass.config_entries.async_unload(mock_config_entry.entry_id)
await hass.async_block_till_done()
assert not hass.data.get(DOMAIN)
assert mock_config_entry.state is ConfigEntryState.NOT_LOADED
@pytest.mark.parametrize(
"side_effect",
[FailedParsingWhoisOutput, UnknownDateFormat, UnknownTld, WhoisCommandFailed],
)
async def test_error_handling(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_whois: MagicMock,
caplog: pytest.LogCaptureFixture,
side_effect: Exception,
) -> None:
"""Test the Whois threw an error."""
mock_config_entry.add_to_hass(hass)
mock_whois.side_effect = side_effect
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
assert mock_config_entry.state is ConfigEntryState.SETUP_RETRY
assert len(mock_whois.mock_calls) == 1
async def test_import_config(
hass: HomeAssistant,
mock_whois: MagicMock,
mock_whois_config_flow: MagicMock,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test the Whois being set up from config via import."""
assert await async_setup_component(
hass,
SENSOR_DOMAIN,
{SENSOR_DOMAIN: {"platform": DOMAIN, CONF_DOMAIN: "home-assistant.io"}},
)
await hass.async_block_till_done()
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert len(mock_whois.mock_calls) == 1
assert "the Whois platform in YAML is deprecated" in caplog.text
| 31.506173 | 82 | 0.768417 |
7da14efebc2bd2315aee52ddd4428d9e86d4db82
| 17,841 |
py
|
Python
|
tests/test_sampler.py
|
jackraymond/dwave-neal
|
3d9f4816b59e8c59d095f994a57b10e88ac816da
|
[
"Apache-2.0"
] | null | null | null |
tests/test_sampler.py
|
jackraymond/dwave-neal
|
3d9f4816b59e8c59d095f994a57b10e88ac816da
|
[
"Apache-2.0"
] | null | null | null |
tests/test_sampler.py
|
jackraymond/dwave-neal
|
3d9f4816b59e8c59d095f994a57b10e88ac816da
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
import unittest
import numpy as np
import copy
import itertools
import warnings
import dimod
import neal
from neal import Neal
class TestSimulatedAnnealingSampler(unittest.TestCase):
def test_instantiation(self):
sampler = Neal()
dimod.testing.assert_sampler_api(sampler)
def test_one_node_beta_range(self):
h = {'a': -1}
bqm = dimod.BinaryQuadraticModel(h, {}, 0, dimod.SPIN)
response = Neal().sample(bqm)
hot_beta, cold_beta = response.info['beta_range']
# Check beta values
# Note: beta is proportional to 1/temperature, therefore hot_beta < cold_beta
self.assertLess(hot_beta, cold_beta)
self.assertNotEqual(hot_beta, float("inf"), "Starting value of 'beta_range' is infinite")
self.assertNotEqual(cold_beta, float("inf"), "Final value of 'beta_range' is infinite")
def test_one_edge_beta_range(self):
J = {('a', 'b'): 1}
bqm = dimod.BinaryQuadraticModel({}, J, 0, dimod.BINARY)
response = Neal().sample(bqm)
hot_beta, cold_beta = response.info['beta_range']
# Check beta values
# Note: beta is proportional to 1/temperature, therefore hot_beta < cold_beta
self.assertLess(hot_beta, cold_beta)
self.assertNotEqual(hot_beta, float("inf"), "Starting value of 'beta_range' is infinite")
self.assertNotEqual(cold_beta, float("inf"), "Final value of 'beta_range' is infinite")
def test_sample_ising(self):
h = {'a': 0, 'b': -1}
J = {('a', 'b'): -1}
resp = Neal().sample_ising(h, J)
row, col = resp.record.sample.shape
self.assertEqual(col, 2) # should get back two variables
self.assertIs(resp.vartype, dimod.SPIN) # should be ising
def test_sample_qubo(self):
Q = {(0, 1): 1}
resp = Neal().sample_qubo(Q)
row, col = resp.record.sample.shape
self.assertEqual(col, 2) # should get back two variables
self.assertIs(resp.vartype, dimod.BINARY) # should be qubo
def test_basic_response(self):
sampler = Neal()
h = {'a': 0, 'b': -1}
J = {('a', 'b'): -1}
response = sampler.sample_ising(h, J)
self.assertIsInstance(response, dimod.SampleSet, "Sampler returned an unexpected response type")
def test_num_reads(self):
sampler = Neal()
h = {}
J = {('a', 'b'): .5, (0, 'a'): -1, (1, 'b'): 0.0}
for num_reads in (1, 10, 100, 3223, 10352):
response = sampler.sample_ising(h, J, num_reads=num_reads)
row, col = response.record.sample.shape
self.assertEqual(row, num_reads)
self.assertEqual(col, 4)
for bad_num_reads in (0, -1, -100):
with self.assertRaises(ValueError):
sampler.sample_ising(h, J, num_reads=bad_num_reads)
for bad_num_reads in (3.5, float("inf"), "string", [], {}):
with self.assertRaises(TypeError):
sampler.sample_ising(h, J, num_reads=bad_num_reads)
def test_empty_problem(self):
sampler = Neal()
h = {'a': 0, 'b': -1}
J = {('a', 'b'): -1}
eh, eJ = {}, {}
for h in (h, eh):
for J in (J, eJ):
_h = copy.deepcopy(h)
_J = copy.deepcopy(J)
r = sampler.sample_ising(_h, _J)
def test_seed(self):
sampler = Neal()
num_vars = 40
h = {v: -1 for v in range(num_vars)}
J = {(u, v): -1 for u in range(num_vars) for v in range(u, num_vars) if u != v}
num_reads = 1000
# test seed exceptions
for bad_seed in (3.5, float("inf"), "string", [], {}):
self.assertRaises(TypeError, sampler.sample_ising, {}, {}, seed=bad_seed)
for bad_seed in (-1, -100, 2**65):
self.assertRaises(ValueError, sampler.sample_ising, {}, {}, seed=bad_seed)
# no need to do a bunch of sweeps, in fact the less we do the more
# sure we can be that the same seed is returning the same result
all_samples = []
for seed in (1, 25, 2352, 736145, 5682453):
response0 = sampler.sample_ising(h, J, num_reads=num_reads, num_sweeps=10, seed=seed)
response1 = sampler.sample_ising(h, J, num_reads=num_reads, num_sweeps=10, seed=seed)
samples0 = response0.record.sample
samples1 = response1.record.sample
self.assertTrue(np.array_equal(samples0, samples1), "Same seed returned different results")
for previous_sample in all_samples:
self.assertFalse(np.array_equal(samples0, previous_sample), "Different seed returned same results")
all_samples.append(samples0)
def test_disconnected_problem(self):
sampler = Neal()
h = {}
J = {
# K_3
(0, 1): -1,
(1, 2): -1,
(0, 2): -1,
# disonnected K_3
(3, 4): -1,
(4, 5): -1,
(3, 5): -1,
}
resp = sampler.sample_ising(h, J, num_sweeps=1000, num_reads=100)
row, col = resp.record.sample.shape
self.assertEqual(row, 100)
self.assertEqual(col, 6) # should get back two variables
self.assertIs(resp.vartype, dimod.SPIN) # should be ising
def test_geometric_schedule(self):
sampler = Neal()
num_vars = 40
h = {v: -1 for v in range(num_vars)}
J = {(u, v): -1 for u in range(num_vars) for v in range(u, num_vars) if u != v}
num_reads = 10
resp = sampler.sample_ising(h, J, num_reads=num_reads, beta_schedule_type='geometric')
row, col = resp.record.sample.shape
self.assertEqual(row, num_reads)
self.assertEqual(col, num_vars) # should get back two variables
self.assertIs(resp.vartype, dimod.SPIN) # should be ising
with self.assertRaises(ValueError):
sampler.sample_ising(h, J, num_reads=num_reads, beta_schedule_type='asd')
def test_interrupt_error(self):
sampler = Neal()
num_vars = 40
h = {v: -1 for v in range(num_vars)}
J = {(u, v): -1 for u in range(num_vars) for v in range(u, num_vars) if u != v}
num_reads = 100
def f():
raise NotImplementedError
resp = sampler.sample_ising(h, J, num_reads=num_reads, interrupt_function=f)
self.assertEqual(len(resp), 1)
def test_sampleset_initial_states(self):
bqm = dimod.BinaryQuadraticModel.from_ising({}, {'ab': 1, 'bc': 1, 'ca': 1})
initial_states = dimod.SampleSet.from_samples_bqm({'a': 1, 'b': -1, 'c': 1}, bqm)
response = Neal().sample(bqm, initial_states=initial_states, num_reads=1)
self.assertEqual(len(response), 1)
self.assertEqual(response.first.energy, -1)
def test_initial_states_generator(self):
bqm = dimod.BinaryQuadraticModel.from_ising({}, {'ab': -1, 'bc': 1, 'ac': 1})
init = dimod.SampleSet.from_samples_bqm([{'a': 1, 'b': 1, 'c': 1},
{'a': -1, 'b': -1, 'c': -1}], bqm)
sampler = Neal()
# 2 fixed initial state, 8 random
resp = sampler.sample(bqm, initial_states=init, num_reads=10)
self.assertEqual(len(resp), 10)
# 2 fixed initial states, 8 random, explicit
resp = sampler.sample(bqm, initial_states=init, initial_states_generator='random', num_reads=10)
self.assertEqual(len(resp), 10)
# all random
resp = sampler.sample(bqm, initial_states_generator='random', num_reads=10)
self.assertEqual(len(resp), 10)
# all random
resp = sampler.sample(bqm, num_reads=10)
self.assertEqual(len(resp), 10)
# zero-length init states in tuple format, extended by random samples
zero_init_tuple = (np.empty((0, 3)), ['a', 'b', 'c'])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
resp = sampler.sample(bqm, initial_states=zero_init_tuple, num_reads=10)
self.assertEqual(len(resp), 10)
# explicit None for initial_states should use one random init state
resp = sampler.sample(bqm, initial_states=None)
self.assertEqual(len(resp), 1)
# initial_states truncated to num_reads?
resp = sampler.sample(bqm, initial_states=init, initial_states_generator='none', num_reads=1)
self.assertEqual(len(resp), 1)
resp = sampler.sample(bqm, initial_states=init, initial_states_generator='tile', num_reads=1)
self.assertEqual(len(resp), 1)
resp = sampler.sample(bqm, initial_states=init, initial_states_generator='random', num_reads=1)
self.assertEqual(len(resp), 1)
# 2 fixed initial states, repeated 5 times
resp = sampler.sample(bqm, initial_states=init, initial_states_generator='tile', num_reads=10)
self.assertEqual(len(resp), 10)
# can't tile empty states
with self.assertRaises(ValueError):
resp = sampler.sample(bqm, initial_states_generator='tile', num_reads=10)
# not enough initial states
with self.assertRaises(ValueError):
resp = sampler.sample(bqm, initial_states_generator='none', num_reads=3)
# initial_states incompatible with the bqm
init = dimod.SampleSet.from_samples({'a': 1, 'b': 1}, vartype='SPIN', energy=0)
with self.assertRaises(ValueError):
resp = sampler.sample(bqm, initial_states=init)
def test_soft_num_reads(self):
"""Number of reads adapts to initial_states size, if provided."""
bqm = dimod.BinaryQuadraticModel.from_ising({}, {'ab': -1, 'bc': 1, 'ac': 1})
init = dimod.SampleSet.from_samples_bqm([{'a': 1, 'b': 1, 'c': 1},
{'a': -1, 'b': -1, 'c': -1}], bqm)
sampler = Neal()
# default num_reads == 1
self.assertEqual(len(sampler.sample(bqm)), 1)
self.assertEqual(len(sampler.sample(bqm, initial_states_generator="random")), 1)
# with initial_states, num_reads == len(initial_states)
self.assertEqual(len(sampler.sample(bqm, initial_states=init)), 2)
# ... but explicit truncation works too
self.assertEqual(len(sampler.sample(bqm, initial_states=init, num_reads=1)), 1)
# if num_reads explicitly given together with initial_states, they are expanded
self.assertEqual(len(sampler.sample(bqm, initial_states=init, num_reads=3)), 3)
# if num_reads explicitly given together without initial_states, they are generated
self.assertEqual(len(sampler.sample(bqm, num_reads=4)), 4)
class TestDefaultBetaRange(unittest.TestCase):
def test_empty_problem(self):
#Values have no impact on behaviour, but should conform to documented structure
beta_range = neal.sampler._default_ising_beta_range({}, {})
self.assertTrue(len(beta_range)==2 and min(beta_range)>= 0)
def test_single_variable_ising_problem(self):
h1, c1 = neal.sampler._default_ising_beta_range({'a': 0.1}, {})
h2, c2 = neal.sampler._default_ising_beta_range({'a': 1}, {})
h3, c3 = neal.sampler._default_ising_beta_range({'a': 10}, {})
self.assertTrue(h1 > h2 > h3)
self.assertTrue(c1 > c2 > c3)
self.assertTrue(h1 < c1 and h2 < c2 and h3 < c3)
def test_single_coupling_ising_problem(self):
h1, c1 = neal.sampler._default_ising_beta_range({}, {'ab': 0.1})
h2, c2 = neal.sampler._default_ising_beta_range({}, {'ab': 1})
h3, c3 = neal.sampler._default_ising_beta_range({}, {'ab': 10})
self.assertTrue(h1 > h2 > h3)
self.assertTrue(c1 > c2 > c3)
self.assertTrue(h1 < c1 and h2 < c2 and h3 < c3)
def test_bias_coupling_ranges(self):
h1, c1 = neal.sampler._default_ising_beta_range({'a': 1}, {'ab': 1})
h2, c2 = neal.sampler._default_ising_beta_range({'a': 10}, {'ab': 1})
h3, c3 = neal.sampler._default_ising_beta_range({'a': 10}, {'ab': 10})
self.assertTrue(h1 > h2 > h3)
self.assertTrue(c1 == c2 > c3)
self.assertTrue(h1 < c1 and h2 < c2 and h3 < c3)
def test_default_beta_range(self):
bqm = dimod.BinaryQuadraticModel.from_ising({'a': 1}, {'bc': 1})
self.assertEqual(neal.default_beta_range(bqm),
neal.default_beta_range(bqm.binary))
class TestHeuristicResponse(unittest.TestCase):
def test_job_shop_scheduling_with_linear(self):
# Set up a job shop scheduling BQM
#
# Provide hardcode version of the bqm of "jobs"
# jobs = {'b': [(1,1), (3,1)],
# 'o': [(2,2), (4,1)],
# 'g': [(1,2)]}
#
# There are three jobs: 'b', 'o', 'g'
# Each tuple represents a task that runs on a particular machine for a given amount of
# time. I.e. (machine_id, duration_on_machine)
#
# Variables below are labelled as '<job_name>_<task_index>,<task_start_time>'.
linear = {'b_0,0': -2.0,
'b_0,1': -2.0,
'b_0,2': -2.0,
'b_0,3': -2.0,
'b_1,0': 0.125,
'b_1,1': -1.5,
'b_1,2': 0.0,
'g_0,0': -1.875,
'g_0,1': -1.5,
'g_0,2': 0.0,
'o_0,0': -2.0,
'o_0,1': -2.0,
'o_0,2': -2.0,
'o_1,0': 0.03125,
'o_1,1': -1.875,
'o_1,2': -1.5,
'o_1,3': 0.0}
quadratic = {('b_0,0', 'g_0,0'): 4,
('b_0,1', 'b_0,0'): 4.0,
('b_0,1', 'g_0,0'): 2,
('b_0,2', 'b_0,0'): 4.0,
('b_0,2', 'b_0,1'): 4.0,
('b_0,2', 'b_1,2'): 2,
('b_0,2', 'g_0,1'): 2,
('b_0,2', 'g_0,2'): 4,
('b_0,3', 'b_0,0'): 4.0,
('b_0,3', 'b_0,1'): 4.0,
('b_0,3', 'b_0,2'): 4.0,
('b_0,3', 'b_1,2'): 2,
('b_0,3', 'g_0,2'): 2,
('b_1,1', 'b_0,1'): 2,
('b_1,1', 'b_0,2'): 2,
('b_1,1', 'b_0,3'): 2,
('b_1,1', 'b_1,2'): 4.0,
('g_0,1', 'b_0,1'): 4,
('g_0,1', 'g_0,0'): 4.0,
('g_0,2', 'g_0,0'): 4.0,
('g_0,2', 'g_0,1'): 4.0,
('o_0,0', 'o_1,1'): 2,
('o_0,1', 'o_0,0'): 4.0,
('o_0,1', 'o_1,1'): 2,
('o_0,1', 'o_1,2'): 2,
('o_0,2', 'o_0,0'): 4.0,
('o_0,2', 'o_0,1'): 4.0,
('o_0,2', 'o_1,1'): 2,
('o_1,2', 'o_0,2'): 2,
('o_1,2', 'o_1,1'): 4.0,
('o_1,3', 'o_0,2'): 2,
('o_1,3', 'o_1,1'): 4.0,
('o_1,3', 'o_1,2'): 4.0}
jss_bqm = dimod.BinaryQuadraticModel(linear, quadratic, 9.0, dimod.BINARY)
# Optimal energy
optimal_solution = {'b_0,0': 1, 'b_0,1': 0, 'b_0,2': 0, 'b_0,3': 0,
'b_1,0': 0, 'b_1,1': 1, 'b_1,2': 0,
'g_0,0': 0, 'g_0,1': 1, 'g_0,2': 0,
'o_0,0': 1, 'o_0,1': 0, 'o_0,2': 0,
'o_1,0': 0, 'o_1,1': 0, 'o_1,2': 1, 'o_1,3': 0}
optimal_energy = jss_bqm.energy(optimal_solution) # Evaluates to 0.5
# Get heuristic solution
sampler = Neal()
response = sampler.sample(jss_bqm, beta_schedule_type="linear", num_reads=10)
_, response_energy, _ = next(response.data())
# Compare energies
threshold = 0.1 # Arbitrary threshold
self.assertLess(response_energy, optimal_energy + threshold)
def test_cubic_lattice_with_geometric(self):
# Set up all lattice edges in a cube. Each edge is labelled by a 3-D coordinate system
def get_cubic_lattice_edges(N):
for x, y, z in itertools.product(range(N), repeat=3):
u = x, y, z
yield u, ((x+1)%N, y, z)
yield u, (x, (y+1)%N, z)
yield u, (x, y, (z+1)%N)
# Add a J-bias to each edge
np_rand = np.random.RandomState(128)
J = {e: np_rand.choice((-1, 1)) for e in get_cubic_lattice_edges(12)}
# Solve ising problem
sampler = Neal()
response = sampler.sample_ising({}, J, beta_schedule_type="geometric", num_reads=10)
_, response_energy, _ = next(response.data())
# Note: lowest energy found was -3088 with a different benchmarking tool
threshold = -3000
self.assertLess(response_energy, threshold, ("response_energy, {}, exceeds "
"threshold").format(response_energy))
if __name__ == "__main__":
unittest.main()
| 39.646667 | 115 | 0.554846 |
b0a85784c923734a97189a0909e0a167abafd929
| 10,616 |
py
|
Python
|
src/cognito_auth_server/server.py
|
mathewmoon/cognito-auth-server
|
6deb59de32e479fdc6a04e27390ef4cb70421723
|
[
"Apache-2.0"
] | null | null | null |
src/cognito_auth_server/server.py
|
mathewmoon/cognito-auth-server
|
6deb59de32e479fdc6a04e27390ef4cb70421723
|
[
"Apache-2.0"
] | null | null | null |
src/cognito_auth_server/server.py
|
mathewmoon/cognito-auth-server
|
6deb59de32e479fdc6a04e27390ef4cb70421723
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3.8
from os import (
path,
R_OK,
W_OK,
access,
chmod,
chown,
remove,
stat,
environ,
chdir
)
from socketserver import (
TCPServer as TCPSocketServer,
UnixStreamServer,
BaseRequestHandler
)
from pwd import getpwnam
from stat import S_ISSOCK
from logging import getLogger
from json import dumps
from textwrap import dedent
import socketserver
from socket import timeout
import http.server
from time import time, sleep
from threading import Thread
import signal
from botocore.exceptions import ClientError
from cognitoinator import Session
from cognitoinator.providers import TokenFetcher
logger = getLogger("botocore")
logger.setLevel(environ.get("ERROR_LEVEL", "INFO"))
class SocketRequestMixin:
def authed_response(self, secret: str) -> str:
""" Reads data from the client to send either an error or the credentials """
# Send the secret up front or not at all....
self.request.settimeout(.5)
buff = 1024
error = False
# If the client doesn't send any data we want to assume
# they aren't ever going to and treat it as a bad secret
try:
msg = self.request.recv(buff).decode().strip()
except timeout:
logger.warning(f"Timeout receiving credentials from {self.client} on {self.sock_addr}")
error = True
if error or msg != secret:
res = {"error": "not authorized"}
logger.warning(f"Access denied (bad credentials) for {self.client} on {self.sock_addr}")
else:
logger.info(f"SUCCESS for: {self.client} on {self.sock_addr}")
if self.__class__ is TokenRequestHandler:
res = self.session.tokens
elif self.__class__ is IAMRequestHandler:
res = self.session.auth_client.profile_credentials
return res
def handle_error(self, e):
msg = dedent(f"""
{e}
Client: {self.client}
Socket: {self.sock_addr}
""")
logger.exception(msg)
return {"error": "Internal Error"}
class IAMRequestHandler(BaseRequestHandler, SocketRequestMixin):
def handle(self):
self.sock_addr = self.request.getsockname()
self.client = self.request.getpeername() or "UNKNOWN CLIENT"
try:
if secret := self.config["global"].get("auth_secret"):
res = self.authed_response(secret)
else:
res = self.session.auth_client.profile_credentials
logger.info(f"SUCCESS for: {self.client} on {self.sock_addr}")
except Exception as e:
res = self.handle_error(e)
self.request.sendall(dumps(res).encode())
class TokenRequestHandler(BaseRequestHandler, SocketRequestMixin):
def handle(self):
self.sock_addr = self.request.getsockname()
self.client = self.request.getpeername() or "UNKNOWN CLIENT"
try:
if secret := self.config.get("auth_secret"):
res = self.authed_response(secret)
else:
res = self.session.tokens
except Exception as e:
res = self.handle_error(e)
self.request.sendall(dumps(res).encode())
class HttpRequestHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
try:
if secret := self.config["global"].get("auth_secret"):
if not self.auth(secret):
return
if self.credential_type == "iam":
res = dumps(self.session.auth_client.profile_credentials).encode()
else:
res = dumps(self.session.tokens).encode()
except Exception as e:
logger.exception(e)
self.send_response(500)
self.wfile.write("500 Internal Error".encode())
return
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers
self.wfile.write(res)
def auth(self, secret):
authed = False
if self.headers.get("Cognito-Api-Key") != secret:
self.send_response(401)
self.wfile.write("401 Not Authorized".encode())
else:
authed = True
return authed
class CognitoServer():
session: Session = None
STS: object = None
def __init__(self, config: dict):
self.config = config
self.credential_type = self.config["global"]["credential_type"]
for k, v in self.config["server"].items():
setattr(self, k, v)
for k, v in self.config["global"].items():
setattr(self, k, v)
def get_session(self) -> Session:
if isinstance(self, WebServer):
# We set this because the WebServer uses a single request handler and
# needs to differentiate whether to look up tokens or iam credentials
# from its session attribute
setattr(HttpRequestHandler, "credential_type", self.credential_type)
iam_handler = HttpRequestHandler
token_handler = HttpRequestHandler
else:
iam_handler = IAMRequestHandler
token_handler = TokenRequestHandler
setattr(iam_handler, "config", self.config)
setattr(token_handler, "config", self.config)
if self.credential_type == "tokens":
self.session = TokenFetcher(
config=self.config["cognito"],
server=True
)
# This allows us to access the session inside of our request handler
# and actually get the credentials that we want to return
setattr(token_handler, "session", self.session)
self.request_handler = token_handler
else:
# We are doing IAM
self.session = Session(cognito_config=self.config["cognito"])
setattr(iam_handler, "session", self.session)
self.request_handler = iam_handler
self.STS = self.session.client("sts")
self.threaded_credential_refresher()
return self.session
def refresh_expired_credentials(self):
"""
Force credentials to refresh if expired by calling an AWS endpoint.
The credentials plugin will automatically check the credentials and refresh if needed
"""
while True:
try:
# get_caller_identity() is perfect because it requires no IAM permissions
self.STS.get_caller_identity()
except (Exception, ClientError):
start_time = time()
logger.info("Credentials have expired. Refreshing credentials")
self.session.auth_client.cognito_login()
logger.info(
f"refreshed credentials in {time() - start_time} seconds.")
sleep(5)
def threaded_credential_refresher(self):
"""
Starts a separate thread that will call STS.get_caller_identity() in a loop
so that our credentials will automatically get refreshed by the session's auth handler
"""
logger.info("Started credential refresher thread.")
t = Thread(target=self.refresh_expired_credentials)
t.daemon = True
t.start()
class SocketServer(CognitoServer):
def __init__(self, config: dict):
super().__init__(config)
self.server = None
self.set_signal_handlers()
self.permissions = int(f"0o{self.permissions}", 8)
self.get_session()
def set_signal_handlers(self):
signal.signal(signal.SIGINT, self.shutdown)
signal.signal(signal.SIGURG, self.shutdown)
signal.signal(signal.SIGTERM, self.shutdown)
signal.signal(signal.SIGTSTP, self.shutdown)
def get_socket_path(self) -> str:
"""Determines if self.socket_path is read/writeable"""
socket_path = self.socket_path
socket_dir = path.dirname(socket_path)
if not (
path.isdir(socket_dir)
and access(socket_dir, R_OK)
and access(socket_dir, W_OK)
):
raise OSError(f"Cannot access path to socket {path.dirname(socket_path)}. Make sure path exists and the current user has R/W access.")
return socket_path
def set_socket_permissions(self, socket_file: str):
"""Sets permissions on socket file"""
if isinstance(self.user, str):
self.user = getpwnam(self.user).pw_uid
try:
chown(socket_file, int(self.user), int(self.group))
chmod(socket_file, self.permissions)
except Exception as e:
raise OSError("Could not set permissions/ownership on socket.") from e
def shutdown(self, _, __):
try:
if self.server is not None:
self.server.server_close()
if self.socket_path:
remove(self.socket_path)
except Exception as e:
logger.exception(e)
exit()
def start(self):
"""Starts the socket server"""
socket_path = self.get_socket_path()
self.socket_path = socket_path
socket_dir = path.dirname(socket_path)
chdir(socket_dir)
socket_file = path.basename(socket_path)
try:
if S_ISSOCK(stat(socket_file).st_mode):
try:
remove(socket_file)
except OSError as e:
raise Exception(f"Socket file {socket_file} already exists and an exception was raised when trying to remove it.") from e
except FileNotFoundError:
pass
with UnixStreamServer(socket_file, self.request_handler) as SERVER:
self.server = SERVER
SERVER.server_activate()
self.set_socket_permissions(socket_file)
SERVER.serve_forever()
class TCPServer(CognitoServer):
def __init__(self, config: dict, request_handler=None):
super().__init__(config)
self.get_session()
def start(self):
"""Starts the server"""
with TCPSocketServer(
(self.host, self.port),
self.request_handler
) as SERVER:
SERVER.serve_forever()
class WebServer(CognitoServer):
def __init__(self, config: dict):
super().__init__(config)
self.get_session()
def start(self):
"""Starts the server"""
with TCPSocketServer(
(self.host, self.port),
self.request_handler
) as SERVER:
SERVER.serve_forever()
| 32.968944 | 146 | 0.611059 |
62ff22711b889f7cfc62084815f6ebbc36b5ced6
| 2,743 |
py
|
Python
|
dvc/objects/db/reference.py
|
lucasalavapena/dvc
|
230eb7087df7f063ded7422af7ae45bd04eb794a
|
[
"Apache-2.0"
] | null | null | null |
dvc/objects/db/reference.py
|
lucasalavapena/dvc
|
230eb7087df7f063ded7422af7ae45bd04eb794a
|
[
"Apache-2.0"
] | 87 |
2021-04-27T08:17:31.000Z
|
2022-03-30T12:12:40.000Z
|
dvc/objects/db/reference.py
|
lucasalavapena/dvc
|
230eb7087df7f063ded7422af7ae45bd04eb794a
|
[
"Apache-2.0"
] | null | null | null |
import io
import logging
import os
from typing import TYPE_CHECKING, Dict
from dvc.scheme import Schemes
from ..errors import ObjectFormatError
from ..reference import ReferenceHashFile
from .base import ObjectDB
if TYPE_CHECKING:
from dvc.fs.base import BaseFileSystem
from dvc.hash_info import HashInfo
from dvc.types import AnyPath
logger = logging.getLogger(__name__)
class ReferenceObjectDB(ObjectDB):
"""Reference ODB.
File objects are stored as ReferenceHashFiles which reference paths outside
of the staging ODB fs. Tree objects are stored natively.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._fs_cache: Dict[tuple, "BaseFileSystem"] = {}
self._obj_cache: Dict["HashInfo", "ReferenceHashFile"] = {}
def get(self, hash_info: "HashInfo"):
if hash_info.isdir:
return super().get(hash_info)
try:
return self._obj_cache[hash_info]
except KeyError:
pass
fs_path = self.hash_to_path(hash_info.value)
try:
with self.fs.open(fs_path, "rb") as fobj:
ref_file = ReferenceHashFile.from_bytes(
fobj.read(), fs_cache=self._fs_cache
)
except OSError:
raise FileNotFoundError
try:
ref_file.check(self, check_hash=False)
except ObjectFormatError:
self.fs.remove(fs_path)
raise
self._obj_cache[hash_info] = ref_file
return ref_file
def _add_file(
self,
from_fs: "BaseFileSystem",
from_info: "AnyPath",
to_info: "AnyPath",
hash_info: "HashInfo",
hardlink: bool = False,
):
self.makedirs(self.fs.path.parent(to_info))
if hash_info.isdir:
return super()._add_file(
from_fs,
from_info,
to_info,
hash_info,
hardlink=hardlink,
)
ref_file = ReferenceHashFile(from_info, from_fs, hash_info)
self._obj_cache[hash_info] = ref_file
ref_fobj = io.BytesIO(ref_file.to_bytes())
ref_fobj.seek(0)
try:
self.fs.upload(ref_fobj, to_info)
except OSError as exc:
if isinstance(exc, FileExistsError) or (
os.name == "nt"
and exc.__context__
and isinstance(exc.__context__, FileExistsError)
):
logger.debug("'%s' file already exists, skipping", to_info)
else:
raise
if from_fs.scheme != Schemes.LOCAL:
self._fs_cache[ReferenceHashFile.config_tuple(from_fs)] = from_fs
| 30.820225 | 79 | 0.595334 |
3b2402c9a039380327273f22d5eb4bcecdbd0ad0
| 5,873 |
py
|
Python
|
evennia/contrib/tutorial_examples/red_button.py
|
pakhnu/my-world
|
405983dca81e70fc64d58d6a60126ffa5e8ada8c
|
[
"BSD-3-Clause"
] | 9 |
2017-07-10T04:27:31.000Z
|
2020-07-31T08:54:08.000Z
|
evennia/contrib/tutorial_examples/red_button.py
|
pakhnu/my-world
|
405983dca81e70fc64d58d6a60126ffa5e8ada8c
|
[
"BSD-3-Clause"
] | null | null | null |
evennia/contrib/tutorial_examples/red_button.py
|
pakhnu/my-world
|
405983dca81e70fc64d58d6a60126ffa5e8ada8c
|
[
"BSD-3-Clause"
] | 4 |
2017-09-11T02:26:21.000Z
|
2021-12-31T05:20:34.000Z
|
"""
This is a more advanced example object. It combines functions from
script.examples as well as commands.examples to make an interactive
button typeclass.
Create this button with
@create/drop examples.red_button.RedButton
Note that you must drop the button before you can see its messages!
"""
import random
from evennia import DefaultObject
from evennia.contrib.tutorial_examples import red_button_scripts as scriptexamples
from evennia.contrib.tutorial_examples import cmdset_red_button as cmdsetexamples
#
# Definition of the object itself
#
class RedButton(DefaultObject):
"""
This class describes an evil red button. It will use the script
definition in contrib/examples/red_button_scripts to blink at regular
intervals. It also uses a series of script and commands to handle
pushing the button and causing effects when doing so.
The following attributes can be set on the button:
desc_lid_open - description when lid is open
desc_lid_closed - description when lid is closed
desc_lamp_broken - description when lamp is broken
"""
def at_object_creation(self):
"""
This function is called when object is created. Use this
instead of e.g. __init__.
"""
# store desc (default, you can change this at creation time)
desc = "This is a large red button, inviting yet evil-looking. "
desc += "A closed glass lid protects it."
self.db.desc = desc
# We have to define all the variables the scripts
# are checking/using *before* adding the scripts or
# they might be deactivated before even starting!
self.db.lid_open = False
self.db.lamp_works = True
self.db.lid_locked = False
self.cmdset.add_default(cmdsetexamples.DefaultCmdSet, permanent=True)
# since the cmdsets relevant to the button are added 'on the fly',
# we need to setup custom scripts to do this for us (also, these scripts
# check so they are valid (i.e. the lid is actually still closed)).
# The AddClosedCmdSet script makes sure to add the Closed-cmdset.
self.scripts.add(scriptexamples.ClosedLidState)
# the script EventBlinkButton makes the button blink regularly.
self.scripts.add(scriptexamples.BlinkButtonEvent)
# state-changing methods
def open_lid(self):
"""
Opens the glass lid and start the timer so it will soon close
again.
"""
if self.db.lid_open:
return
desc = self.db.desc_lid_open
if not desc:
desc = "This is a large red button, inviting yet evil-looking. "
desc += "Its glass cover is open and the button exposed."
self.db.desc = desc
self.db.lid_open = True
# with the lid open, we validate scripts; this will clean out
# scripts that depend on the lid to be closed.
self.scripts.validate()
# now add new scripts that define the open-lid state
self.scripts.add(scriptexamples.OpenLidState)
# we also add a scripted event that will close the lid after a while.
# (this one cleans itself after being called once)
self.scripts.add(scriptexamples.CloseLidEvent)
def close_lid(self):
"""
Close the glass lid. This validates all scripts on the button,
which means that scripts only being valid when the lid is open
will go away automatically.
"""
if not self.db.lid_open:
return
desc = self.db.desc_lid_closed
if not desc:
desc = "This is a large red button, inviting yet evil-looking. "
desc += "Its glass cover is closed, protecting it."
self.db.desc = desc
self.db.lid_open = False
# clean out scripts depending on lid to be open
self.scripts.validate()
# add scripts related to the closed state
self.scripts.add(scriptexamples.ClosedLidState)
def break_lamp(self, feedback=True):
"""
Breaks the lamp in the button, stopping it from blinking.
Args:
feedback (bool): Show a message about breaking the lamp.
"""
self.db.lamp_works = False
desc = self.db.desc_lamp_broken
if not desc:
self.db.desc += "\nThe big red button has stopped blinking for the time being."
else:
self.db.desc = desc
if feedback and self.location:
self.location.msg_contents("The lamp flickers, the button going dark.")
self.scripts.validate()
def press_button(self, pobject):
"""
Someone was foolish enough to press the button!
Args:
pobject (Object): The person pressing the button
"""
# deactivate the button so it won't flash/close lid etc.
self.scripts.add(scriptexamples.DeactivateButtonEvent)
# blind the person pressing the button. Note that this
# script is set on the *character* pressing the button!
pobject.scripts.add(scriptexamples.BlindedState)
# script-related methods
def blink(self):
"""
The script system will regularly call this
function to make the button blink. Now and then
it won't blink at all though, to add some randomness
to how often the message is echoed.
"""
loc = self.location
if loc:
rand = random.random()
if rand < 0.2:
string = "The red button flashes briefly."
elif rand < 0.4:
string = "The red button blinks invitingly."
elif rand < 0.6:
string = "The red button flashes. You know you wanna push it!"
else:
# no blink
return
loc.msg_contents(string)
| 35.167665 | 91 | 0.641069 |
3c11e9b36e1b5da3d594787007028a180a13e8c8
| 5,129 |
py
|
Python
|
listener/normal/error/forms.py
|
andymckay/arecibo
|
eb6787ea0a276047ef5add2df67a4dd051e5c961
|
[
"Apache-2.0"
] | 6 |
2016-01-26T04:47:52.000Z
|
2022-01-24T19:55:04.000Z
|
listener/normal/error/forms.py
|
andymckay/arecibo
|
eb6787ea0a276047ef5add2df67a4dd051e5c961
|
[
"Apache-2.0"
] | 6 |
2017-02-12T05:11:25.000Z
|
2017-02-12T05:12:15.000Z
|
listener/normal/error/forms.py
|
andymckay/arecibo
|
eb6787ea0a276047ef5add2df67a4dd051e5c961
|
[
"Apache-2.0"
] | 2 |
2015-12-09T22:37:58.000Z
|
2021-09-09T17:04:33.000Z
|
from datetime import datetime, timedelta
import operator
from django import forms
from django.db.models import Q
from app.forms import Form, ModelForm
from app.utils import memoize, safe_int
from projects.models import ProjectURL
from error.models import Error, Group
read_choices = (("", "All"), ("True", 'Read only'), ("False", 'Unread only'))
priority_choices = [ (r, r) for r in range(1, 11)]
priority_choices.insert(0, ("", "All"))
status_choices = ['500', '404', '100', '101', '102', '200', '201', '202', '203',
'204', '205', '206', '207', '226', '300', '301', '302', '303', '304',
'305', '307', '400', '401', '402', '403', '405', '406', '407',
'408', '409', '410', '411', '412', '413', '414', '415', '416', '417',
'422', '423', '424', '426', '501', '502', '503', '504', '505',
'507', '510']
status_choices = [ (r, r) for r in status_choices ]
status_choices.insert(0, ("", "All"))
class Filter(Form):
""" Base for the filters """
inequality = ""
def as_query(self, object):
args = []
for k, v in self.cleaned_data.items():
if not v:
continue
lookup = getattr(self, "handle_%s" % k, None)
if lookup:
args.append(lookup(v))
else:
args.append(Q(**{k:v}))
if args:
return object.objects.filter(reduce(operator.and_, args))
return object.objects.all()
def clean(self):
data = {}
for k, v in self.cleaned_data.items():
if not v: continue
data[k] = v
return data
@memoize(prefix='get-project-urls', time=120)
def get_project_urls():
urls = [('', '')]
urls.extend([k.pk, k.url] for k in ProjectURL.objects.all())
return urls
class GroupForm(Filter):
project_url = forms.ChoiceField(choices=[],
widget=forms.Select, required=False)
def __init__(self, *args, **kw):
super(GroupForm, self).__init__(*args, **kw)
self.fields['project_url'].choices = get_project_urls()
def as_query(self):
return super(GroupForm, self).as_query(Group)
def handle_project_url(self, value):
return Q(project_url=value)
@memoize(prefix='get-domains', time=120)
def get_domains():
errs = ProjectURL.objects.order_by('url').values_list('url', flat=True).distinct()
domains = sorted([(d, d) for d in errs])
domains.insert(0, ('', ''))
return domains
period_choices = (['', ''],
['last_24', 'Last 24 hours'],
['today', 'Today'],
['yesterday', 'Yesterday'])
class GroupEditForm(ModelForm):
class Meta:
model = Group
fields = ['name', 'count', 'timestamp']
class ErrorForm(Filter):
priority = forms.ChoiceField(choices=priority_choices,
widget=forms.Select, required=False)
status = forms.ChoiceField(choices=status_choices,
widget=forms.Select, required=False)
read = forms.ChoiceField(choices=read_choices,
widget=forms.Select, required=False)
start = forms.DateField(required=False, label="Start date",
widget=forms.DateInput(attrs={"class":"date",}))
period = forms.ChoiceField(choices=period_choices,
widget=forms.Select, required=False)
end = forms.DateField(required=False, label="End date",
widget=forms.DateInput(attrs={"class":"date",}))
query = forms.CharField(required=False, label="Path")
ip = forms.CharField(required=False, label="IP")
domain = forms.ChoiceField(choices=[],
widget=forms.Select, required=False)
uid = forms.CharField(required=False)
group = forms.ModelChoiceField(queryset=Group.objects.none(),
widget=forms.Select, required=False)
def __init__(self, *args, **kw):
super(ErrorForm, self).__init__(*args, **kw)
self.fields['group'].queryset = Group.objects.all()
self.fields['domain'].choices = get_domains()
def clean(self):
data = {}
for k, v in self.cleaned_data.items():
if not v: continue
data[k] = v
return data
def handle_period(self, period):
if period == 'last_24':
return Q(timestamp__gte=datetime.now() - timedelta(hours=24))
elif period == 'today':
return Q(timestamp__gte=datetime.today().date())
elif period == 'yesterday':
return Q(timestamp__gte=datetime.today().date() - timedelta(days=1),
timestamp__lt=datetime.today())
else:
raise NotImplementedError
def handle_read(self, value):
return Q(read={"False":False, "True":True}.get(value, None))
def handle_start(self, value):
return Q(timestamp__gte=value)
def handle_end(self, value):
return Q(timestamp__lte=value)
def handle_priority(self, value):
return Q(priority__lte=value)
def as_query(self):
return super(ErrorForm, self).as_query(Error)
| 32.878205 | 86 | 0.585884 |
c587ab6f39b6e60c14d8890c44786db6bc12a15f
| 43,647 |
py
|
Python
|
17tensorflow/6_bert/fine_tune_squad.py
|
cheerfulwang/python-tutorial
|
d0f7348e1da4ff954e3add66e1aae55d599283ee
|
[
"Apache-2.0"
] | 2 |
2021-01-04T10:44:44.000Z
|
2022-02-13T07:53:41.000Z
|
17tensorflow/6_bert/fine_tune_squad.py
|
zm79287/python-tutorial
|
d0f7348e1da4ff954e3add66e1aae55d599283ee
|
[
"Apache-2.0"
] | null | null | null |
17tensorflow/6_bert/fine_tune_squad.py
|
zm79287/python-tutorial
|
d0f7348e1da4ff954e3add66e1aae55d599283ee
|
[
"Apache-2.0"
] | 2 |
2020-11-23T08:58:51.000Z
|
2022-02-13T07:53:42.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run BERT on SQuAD."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append('../')
import argparse
import collections
import logging
import json
import math
import os
import random
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_pretrained_bert.tokenization import whitespace_tokenize, BasicTokenizer, BertTokenizer
from pytorch_pretrained_bert.modeling import BertForQuestionAnswering
from pytorch_pretrained_bert.optimization import BertAdam
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class SquadExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (self.qas_id)
s += ", question_text: %s" % (
self.question_text)
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
def read_squad_examples(input_file, is_training):
"""Read a SQuAD json file into a list of SquadExample."""
with open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
if is_training:
if len(qa["answers"]) != 1:
raise ValueError(
"For training, each question should have exactly 1 answer.")
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position)
examples.append(example)
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
if (example.start_position < doc_start or
example.end_position < doc_start or
example.start_position > doc_end or example.end_position > doc_end):
continue
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if example_index < 20:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (unique_id))
logger.info("example_index: %s" % (example_index))
logger.info("doc_span_index: %s" % (doc_span_index))
logger.info("tokens: %s" % " ".join(tokens))
logger.info("token_to_orig_map: %s" % " ".join([
"%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]))
logger.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in token_is_max_context.items()
]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
logger.info("start_position: %d" % (start_position))
logger.info("end_position: %d" % (end_position))
logger.info(
"answer: %s" % (answer_text))
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position))
unique_id += 1
return features
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, verbose_logging):
"""Write final predictions to the json file."""
logger.info("Writing predictions to: %s" % (output_prediction_file))
logger.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
all_predictions[example.qas_id] = nbest_json[0]["text"]
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def copy_optimizer_params_to_model(named_params_model, named_params_optimizer):
""" Utility function for optimize_on_cpu and 16-bits training.
Copy the parameters optimized on CPU/RAM back to the model on GPU
"""
for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer, named_params_model):
if name_opti != name_model:
logger.error("name_opti != name_model: {} {}".format(name_opti, name_model))
raise ValueError
param_model.data.copy_(param_opti.data)
def set_optimizer_params_grad(named_params_optimizer, named_params_model, test_nan=False):
""" Utility function for optimize_on_cpu and 16-bits training.
Copy the gradient of the GPU parameters to the CPU/RAMM copy of the model
"""
is_nan = False
for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer, named_params_model):
if name_opti != name_model:
logger.error("name_opti != name_model: {} {}".format(name_opti, name_model))
raise ValueError
if param_model.grad is not None:
if test_nan and torch.isnan(param_model.grad).sum() > 0:
is_nan = True
if param_opti.grad is None:
param_opti.grad = torch.nn.Parameter(param_opti.data.new().resize_(*param_opti.data.size()))
param_opti.grad.data.copy_(param_model.grad.data)
else:
param_opti.grad = None
return is_nan
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model checkpoints will be written.")
## Other parameters
parser.add_argument("--train_file", default=None, type=str, help="SQuAD json for training. E.g., train-v1.1.json")
parser.add_argument("--bert_model_dir", default=None, type=str)
parser.add_argument("--bert_model_vocab", default=None, type=str)
parser.add_argument("--predict_file", default=None, type=str,
help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
parser.add_argument("--max_seq_length", default=384, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--doc_stride", default=128, type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.")
parser.add_argument("--max_query_length", default=64, type=int,
help="The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length.")
parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.")
parser.add_argument("--do_predict", default=False, action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.")
parser.add_argument("--predict_batch_size", default=8, type=int, help="Total batch size for predictions.")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10% "
"of training.")
parser.add_argument("--n_best_size", default=20, type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json "
"output file.")
parser.add_argument("--max_answer_length", default=30, type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.")
parser.add_argument("--verbose_logging", default=False, action='store_true',
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--optimize_on_cpu',
default=False,
action='store_true',
help="Whether to perform optimization and keep the optimizer averages on CPU")
parser.add_argument('--fp16',
default=False,
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=128,
help='Loss scaling, positive power of 2 values can improve fp16 convergence.')
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
if args.fp16:
logger.info("16-bits training currently not supported in distributed training")
args.fp16 = False # (see https://github.com/pytorch/pytorch/pull/13496)
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits trainiing: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if args.do_train:
if not args.train_file:
raise ValueError(
"If `do_train` is True, then `train_file` must be specified.")
if args.do_predict:
if not args.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory () already exists and is not empty.")
os.makedirs(args.output_dir, exist_ok=True)
tokenizer = BertTokenizer.from_pretrained(args.bert_model_vocab)
train_examples = None
num_train_steps = None
if args.do_train:
train_examples = read_squad_examples(
input_file=args.train_file, is_training=True)
num_train_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
# Prepare model
model = BertForQuestionAnswering.from_pretrained(args.bert_model_dir)
#model = BertForQuestionAnswering.from_pretrained(args.bert_model_dir, cache_dir=PYTORCH_PRETRAINED_BERT_CACHE)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
if args.fp16:
param_optimizer = [(n, param.clone().detach().to('cpu').float().requires_grad_()) \
for n, param in model.named_parameters()]
elif args.optimize_on_cpu:
param_optimizer = [(n, param.clone().detach().to('cpu').requires_grad_()) \
for n, param in model.named_parameters()]
else:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_steps)
global_step = 0
if args.do_train:
train_features = convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=True)
logger.info("***** Running training *****")
logger.info(" Num orig examples = %d", len(train_examples))
logger.info(" Num split examples = %d", len(train_features))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
if n_gpu == 1:
batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self
input_ids, input_mask, segment_ids, start_positions, end_positions = batch
loss = model(input_ids, segment_ids, input_mask, start_positions, end_positions)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.fp16 and args.loss_scale != 1.0:
# rescale loss for fp16 training
# see https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html
loss = loss * args.loss_scale
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16 or args.optimize_on_cpu:
if args.fp16 and args.loss_scale != 1.0:
# scale down gradients for fp16 training
for param in model.parameters():
if param.grad is not None:
param.grad.data = param.grad.data / args.loss_scale
is_nan = set_optimizer_params_grad(param_optimizer, model.named_parameters(), test_nan=True)
if is_nan:
logger.info("FP16 TRAINING: Nan in gradients, reducing loss scaling")
args.loss_scale = args.loss_scale / 2
model.zero_grad()
continue
optimizer.step()
copy_optimizer_params_to_model(model.named_parameters(), param_optimizer)
else:
optimizer.step()
model.zero_grad()
global_step += 1
if args.do_predict:
eval_examples = read_squad_examples(
input_file=args.predict_file, is_training=False)
eval_features = convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=False)
logger.info("***** Running predictions *****")
logger.info(" Num orig examples = %d", len(eval_examples))
logger.info(" Num split examples = %d", len(eval_features))
logger.info(" Batch size = %d", args.predict_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
if args.local_rank == -1:
eval_sampler = SequentialSampler(eval_data)
else:
eval_sampler = DistributedSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size)
model.eval()
all_results = []
logger.info("Start evaluating")
for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating"):
if len(all_results) % 1000 == 0:
logger.info("Processing example: %d" % (len(all_results)))
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask)
for i, example_index in enumerate(example_indices):
start_logits = batch_start_logits[i].detach().cpu().tolist()
end_logits = batch_end_logits[i].detach().cpu().tolist()
eval_feature = eval_features[example_index.item()]
unique_id = int(eval_feature.unique_id)
all_results.append(RawResult(unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
output_prediction_file = os.path.join(args.output_dir, "predictions.json")
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions.json")
write_predictions(eval_examples, eval_features, all_results,
args.n_best_size, args.max_answer_length,
args.do_lower_case, output_prediction_file,
output_nbest_file, args.verbose_logging)
if __name__ == "__main__":
main()
| 44.904321 | 119 | 0.60916 |
f0501b3f3eb9c926e5df5bab1da6086170777fde
| 14,774 |
py
|
Python
|
tensorflow/contrib/training/python/training/sampling_ops_test.py
|
atfkaka/tensorflow
|
5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a
|
[
"Apache-2.0"
] | 101 |
2016-12-03T11:40:52.000Z
|
2017-12-23T02:02:03.000Z
|
tensorflow/contrib/training/python/training/sampling_ops_test.py
|
atfkaka/tensorflow
|
5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a
|
[
"Apache-2.0"
] | 9 |
2016-12-14T03:27:46.000Z
|
2017-09-13T02:29:07.000Z
|
tensorflow/contrib/training/python/training/sampling_ops_test.py
|
atfkaka/tensorflow
|
5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a
|
[
"Apache-2.0"
] | 47 |
2016-12-04T12:37:24.000Z
|
2018-01-14T18:13:07.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.training.python.training import sampling_ops
from tensorflow.python.platform import tf_logging as logging
class StratifiedSampleTest(tf.test.TestCase):
def testGraphBuildAssertionFailures(self):
val = [tf.zeros([1, 3]), tf.ones([1, 5])]
label = tf.constant([1], shape=[1]) # must have batch dimension
probs = [.2] * 5
init_probs = [.1, .3, .1, .3, .2]
batch_size = 16
# Label must have only batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
tf.contrib.training.stratified_sample(
val, tf.zeros([]), probs, batch_size, init_probs, enqueue_many=True)
with self.assertRaises(ValueError):
tf.contrib.training.stratified_sample(
val, tf.zeros([1, 1]), probs, batch_size, init_probs,
enqueue_many=True)
# Label must not be one-hot.
with self.assertRaises(ValueError):
tf.contrib.training.stratified_sample(
val, tf.constant([0, 1, 0, 0, 0]), probs, batch_size, init_probs)
# Data must be list, not singleton tensor.
with self.assertRaises(TypeError):
tf.contrib.training.stratified_sample(
tf.zeros([1, 3]), label, probs, batch_size, init_probs)
# Data must have batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
tf.contrib.training.stratified_sample(
val, tf.constant(1), probs, batch_size, init_probs, enqueue_many=True)
# Batch dimensions on data and labels should be equal.
with self.assertRaises(ValueError):
tf.contrib.training.stratified_sample(
[tf.zeros([2, 1])], label, probs, batch_size, init_probs,
enqueue_many=True)
# Probabilities must be numpy array, python list, or tensor.
with self.assertRaises(ValueError):
tf.contrib.training.stratified_sample(
val, label, 1, batch_size, init_probs)
# Probabilities shape must be fully defined.
with self.assertRaises(ValueError):
tf.contrib.training.stratified_sample(
val, label, tf.placeholder(
tf.float32, shape=[None]), batch_size, init_probs)
# In the rejection sampling case, make sure that probability lengths are
# the same.
with self.assertRaises(ValueError):
tf.contrib.training.stratified_sample(
val, label, [.1] * 10, batch_size, init_probs=[.2] * 5)
# In the rejection sampling case, make sure that zero initial probability
# classes also have zero target probability.
with self.assertRaises(ValueError):
tf.contrib.training.stratified_sample(
val, label, [.2, .4, .4], batch_size, init_probs=[0, .5, .5])
def testRuntimeAssertionFailures(self):
valid_probs = [.2] * 5
valid_labels = [1, 2, 3]
vals = [tf.zeros([3, 1])]
illegal_labels = [
[0, -1, 1], # classes must be nonnegative
[5, 1, 1], # classes must be less than number of classes
[2, 3], # data and label batch size must be the same
]
illegal_probs = [
[.1] * 5, # probabilities must sum to one
[-.5, .5, .5, .4, .1], # probabilities must be non-negative
]
# Set up graph with illegal label vector.
label_ph = tf.placeholder(tf.int32, shape=[None])
probs_ph = tf.placeholder(tf.float32, shape=[5]) # shape must be defined
val_tf, lbl_tf, prob_tf = sampling_ops._verify_input( # pylint: disable=protected-access
vals, label_ph, [probs_ph])
for illegal_label in illegal_labels:
# Run session that should fail.
with self.test_session() as sess:
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run([val_tf, lbl_tf],
feed_dict={label_ph: illegal_label,
probs_ph: valid_probs})
for illegal_prob in illegal_probs:
# Run session that should fail.
with self.test_session() as sess:
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run([prob_tf],
feed_dict={label_ph: valid_labels,
probs_ph: illegal_prob})
def testCanBeCalledMultipleTimes(self):
batch_size = 20
val_input_batch = [tf.zeros([2, 3, 4])]
lbl_input_batch = tf.ones([], dtype=tf.int32)
probs = np.array([0, 1, 0, 0, 0])
batches = tf.contrib.training.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
batches += tf.contrib.training.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
summary_op = tf.merge_summary(tf.get_collection(tf.GraphKeys.SUMMARIES))
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run(batches + (summary_op,))
coord.request_stop()
coord.join(threads)
def testRejectionBatchingBehavior(self):
batch_size = 20
input_batch_size = 11
val_input_batch = [tf.zeros([input_batch_size, 2, 3, 4])]
lbl_input_batch = tf.cond(
tf.greater(.5, tf.random_uniform([])),
lambda: tf.ones([input_batch_size], dtype=tf.int32) * 1,
lambda: tf.ones([input_batch_size], dtype=tf.int32) * 3)
probs = np.array([0, .2, 0, .8, 0])
data_batch, labels = tf.contrib.training.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size,
init_probs=[0, .3, 0, .7, 0], enqueue_many=True)
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run([data_batch, labels])
coord.request_stop()
coord.join(threads)
def testBatchDimensionNotRequired(self):
classes = 5
# Probs must be a tensor, since we pass it directly to _verify_input.
probs = tf.constant([1.0 / classes] * classes)
# Make sure that these vals/labels pairs don't throw any runtime exceptions.
legal_input_pairs = [
(np.zeros([2, 3]), [x % classes for x in range(2)]), # batch dim 2
(np.zeros([4, 15]), [x % classes for x in range(4)]), # batch dim 4
(np.zeros([10, 1]), [x % classes for x in range(10)]), # batch dim 10
]
# Set up graph with placeholders.
vals_ph = tf.placeholder(tf.float32) # completely undefined shape
labels_ph = tf.placeholder(tf.int32) # completely undefined shape
val_tf, labels_tf, _ = sampling_ops._verify_input( # pylint: disable=protected-access
[vals_ph], labels_ph, [probs])
# Run graph to make sure there are no shape-related runtime errors.
for vals, labels in legal_input_pairs:
with self.test_session() as sess:
sess.run([val_tf, labels_tf],
feed_dict={vals_ph: vals,
labels_ph: labels})
def testRejectionDataListInput(self):
batch_size = 20
val_input_batch = [tf.zeros([2, 3, 4]), tf.ones([2, 4]), tf.ones(2) * 3]
lbl_input_batch = tf.ones([], dtype=tf.int32)
probs = np.array([0, 1, 0, 0, 0])
val_list, lbls = tf.contrib.training.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size,
init_probs=[0, 1, 0, 0, 0])
# Check output shapes.
self.assertTrue(isinstance(val_list, list))
self.assertEqual(len(val_list), len(val_input_batch))
self.assertTrue(isinstance(lbls, tf.Tensor))
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
out = sess.run(val_list + [lbls])
coord.request_stop()
coord.join(threads)
# Check output shapes.
self.assertEqual(len(out), len(val_input_batch) + 1)
def normalBehaviorHelper(self, sampler):
# Set up graph.
tf.set_random_seed(1234)
lbl1 = 0
lbl2 = 3
# This cond allows the necessary class queues to be populated.
label = tf.cond(
tf.greater(.5, tf.random_uniform([])), lambda: tf.constant(lbl1),
lambda: tf.constant(lbl2))
val = [np.array([1, 4]) * label]
probs = np.array([.8, 0, 0, .2, 0])
batch_size = 16
data_batch, labels = sampler(val, label, probs, batch_size)
# Run session and keep track of how frequently the labels and values appear.
data_l = []
label_l = []
with self.test_session() as sess:
# Need to initialize variables that keep running total of classes seen.
tf.global_variables_initializer().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for _ in range(20):
[data], lbls = sess.run([data_batch, labels])
data_l.append(data)
label_l.append(lbls)
coord.request_stop()
coord.join(threads)
# First check that the data matches the labels.
for lbl, data in zip(label_l, data_l):
for i in range(batch_size):
self.assertListEqual(list(np.array([1, 4]) * lbl[i]), list(data[i, :]))
# Check that the labels are approximately correct.
expected_label = probs[0] * lbl1 + probs[3] * lbl2
lbl_list = range(len(probs))
lbl_std_dev = np.sqrt(np.sum((np.square(lbl_list - expected_label))))
lbl_std_dev_of_mean = lbl_std_dev / np.sqrt(len(label_l)) # CLT
actual_lbl = np.mean(label_l)
# Tolerance is 3 standard deviations of the mean. According to the central
# limit theorem, this should cover 99.7% of cases. Note that since the seed
# is fixed, for a given implementation, this test will pass or fail 100% of
# the time. This use of assertNear is to cover cases where someone changes
# an implementation detail, which would cause the random behavior to differ.
self.assertNear(actual_lbl, expected_label, 3 * lbl_std_dev_of_mean)
def testRejectionNormalBehavior(self):
initial_p = [.7, 0, 0, .3, 0]
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return tf.contrib.training.stratified_sample(
val,
lbls,
probs,
batch,
init_probs=initial_p,
enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
def testRejectionNormalBehaviorWithOnlineInitPEstimate(self):
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return tf.contrib.training.stratified_sample(
val, lbls, probs, batch, init_probs=None, enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
class RejectionSampleTest(tf.test.TestCase):
def testGraphConstructionFailures(self):
accept_prob_fn = lambda _: tf.constant(1.0)
batch_size = 32
# Data must have batch dimension if `enqueue_many` is `True`.
with self.assertRaises(ValueError):
tf.contrib.training.rejection_sample(
[tf.zeros([])], accept_prob_fn, batch_size, enqueue_many=True)
# Batch dimensions should be equal if `enqueue_many` is `True`.
with self.assertRaises(ValueError):
tf.contrib.training.rejection_sample(
[tf.zeros([5, 1]), tf.zeros([4, 1])], accept_prob_fn, batch_size,
enqueue_many=True)
def testRuntimeFailures(self):
prob_ph = tf.placeholder(tf.float32, [])
accept_prob_fn = lambda _: prob_ph
batch_size = 32
# Set up graph.
tf.set_random_seed(1234)
tf.contrib.training.rejection_sample(
[tf.zeros([])], accept_prob_fn, batch_size, runtime_checks=True,
name='rejection_sample')
prob_tensor = tf.get_default_graph().get_tensor_by_name(
'rejection_sample/prob_with_checks:0')
# Run session that should fail.
with self.test_session() as sess:
for illegal_prob in [-0.1, 1.1]:
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(prob_tensor, feed_dict={prob_ph: illegal_prob})
def testNormalBehavior(self):
tensor_list = [tf.cond(
tf.greater(.5, tf.random_uniform([])),
lambda: tf.constant(1.0),
lambda: tf.constant(2.0))]
accept_prob_fn = lambda x: x[0] - 1.0
batch_size = 10
# Set up graph.
sample = tf.contrib.training.rejection_sample(
tensor_list, accept_prob_fn, batch_size)
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for _ in range(5):
sample_np = sess.run(sample)[0]
self.assertListEqual([2.0] * batch_size, list(sample_np))
coord.request_stop()
coord.join(threads)
class ConditionalBatchTest(tf.test.TestCase):
def testConditionallyEnqueueAndBatch(self):
tf.set_random_seed(1234)
tensor = tf.cond(
tf.greater(.5, tf.random_uniform([])),
lambda: tf.constant(1.0),
lambda: tf.constant(2.0))
keep_input = tf.equal(tensor, 2.0)
batch_size = 4
# Set up the test graph.
[batch] = sampling_ops._conditional_batch([tensor], keep_input, batch_size) # pylint: disable=protected-access
# Check conditional operation.
with self.test_session():
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
batch_np = batch.eval()
coord.request_stop()
coord.join(threads)
# Check that all elements in batch come from tensors with acceptance prob
# 1, so that none come from acceptance prob 0.
self.assertListEqual(list(batch_np), [2.0] * batch_size)
def testConditionallyEnqueueAndBatchTypes(self):
tensor = tf.constant(1.0)
keep_input = tf.constant(True)
batch_size = 4
# Check that output types are the same for 1 and 2-length input lists.
output1 = sampling_ops._conditional_batch([tensor], keep_input, batch_size) # pylint: disable=protected-access
output2 = sampling_ops._conditional_batch( # pylint: disable=protected-access
[tensor, tensor], keep_input, batch_size)
self.assertEqual(type(output1), type(output2))
if __name__ == '__main__':
tf.test.main()
| 37.497462 | 115 | 0.668472 |
33b8e9bbe31032d4f4b5a440fae08676797f7bf5
| 11,483 |
py
|
Python
|
carbon_from_biovolume_grabber.py
|
snifflesnrumjum/IFCB_analysis
|
46ce192eecbea1d61a865b963c47d3daef351d44
|
[
"MIT"
] | null | null | null |
carbon_from_biovolume_grabber.py
|
snifflesnrumjum/IFCB_analysis
|
46ce192eecbea1d61a865b963c47d3daef351d44
|
[
"MIT"
] | null | null | null |
carbon_from_biovolume_grabber.py
|
snifflesnrumjum/IFCB_analysis
|
46ce192eecbea1d61a865b963c47d3daef351d44
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 17 2015
This script will grab the biovolume feature data from extracted feature files
for all images in an automated class file.
Can bin data by category or leave each image separate.
@author: Darren Henrichs
"""
# script to extract the biovolume estimates from IFCB V2 feature files
# and sum them per category for class files
# this will read a directory of class files and search the feature path for
# those files, pulling the biovolume from them
# 06/13/2017 DWH
# this script is a modified version of the biovolume grabber script
# this script will take the biovolume value for each cell, convert it to
# units of carbon following formulas from Menden_Deuer and Lessard 2000
# then sum the total carbon per category
from scipy.io import loadmat
import os
import pandas as pd
import numpy as np
__author__ = 'dhenrichs'
# path to the feature files
#feature_path = '/home/transcriptome/synology/IFCB/extracted_features/2017/'
#feature_path = '/home/transcriptome/synology/IFCB/extracted_features_surfside/2017/'
feature_path = '/home/transcriptome/synology/IFCB/cruise_data/processed_data/extracted_features/'
#feature_path = '/data2/processed_data_surfside/extracted_features/2017/'
# path to the class files for binning the biovolumes into categories
#class_path = '/data4/manual_classify_results/temp_alyssa_manual/' #processed_data/class_files/2014/'
#class_path = '/data4/Cruise_data/HRR_cruise/manual_corrections_cruise_data_31Jan2019/'
class_path = '/home/transcriptome/synology/IFCB/manual_corrections_cruise_data/'
#class_path = '/home/transcriptome/synology/IFCB/class_files/CNN_class_files/2017/'
#class_path = '/home/transcriptome/synology/IFCB/class_files_surfside/CNN_class_files/2017/'
# path to where the outfiles with biovolume will be located
#outpath = '/data4/test_biovolume/'
outpath = '/home/transcriptome/synology/IFCB/cruise_data/processed_data/carbon_biovolume/'
#outpath = '/home/transcriptome/synology/IFCB/class_files_surfside/CNN_class_files/carbon_biovolume/'
# limit files to one particular month/day IMPORTANT: if you don't want to limit the date, just put None
date_limiter = 'D2017' # a string (e.g. 'D20170404') or None (you literally have to type None)
#are you using automated class files or manually corrected mat files?
automated_or_manual = 'manual' #can be 'automated' or 'manual'
def grab_biovolume(in_feature, in_class, automated):
'''this function is designed to return the total sum of carbon per category.
this will NOT return the carbon for each image'''
feature_data = load_feature_file(in_feature)
if automated == 'automated':
feature_data['Biovolume'] = convert_biovolume_pixels_to_microns(feature_data['Biovolume'])
category_list, class_data = load_class_file_automated(in_class)
if 'unclassified' not in category_list:
category_list.append('unclassified')
outdata = pd.DataFrame([0]*len(category_list), index=category_list, columns=['Biovolume']).T
for image_cat, feat_size in zip(class_data, feature_data['Biovolume']):
carbon_value = calculate_carbon_from_biovolume(feat_size, image_cat)
outdata[image_cat] += carbon_value
return outdata.T
elif automated == 'manual':
category_list, class_data, roinums = load_class_file_manual(in_class)
converted_data = pd.DataFrame(index=roinums)
converted_data['Category'] = class_data
converted_data = converted_data.dropna()
b = list(map(lambda x: category_list[int(x-1)], converted_data['Category']))
converted_data['Category'] = b
outdata = pd.DataFrame([0.]*len(category_list), index=category_list, columns=['Biovolume'])
converted_data['Biovolume'] = convert_biovolume_pixels_to_microns(feature_data['Biovolume'])
skipped_imgs = 0
for image_cat, feat_size in zip(class_data, converted_data['Biovolume']):
try:
#if not np.isnan(image_cat):
carbon_value = calculate_carbon_from_biovolume(feat_size, category_list[int(image_cat)])
outdata.T[category_list[int(image_cat)-1]] += carbon_value
#print "after", outdata.T[category_list[int(image_cat)-1]]
#print 'CARBON:',carbon_value
#print 'FEAT_SIZE:', feat_size
except:
#print 'Error occurred, skipping image:', image_cat
skipped_imgs += 1
#raise
print 'skipped_images:', skipped_imgs,
return outdata
else:
return None
def convert_biovolume_pixels_to_microns(in_value):
'''The biovolume values given from the IFCB data processing
are in pixel units. Need to convert pixels to microns.
Will use a calculated value from a beads file.'''
conversion = 0.2 #this is the number of microns per pixel; this value calculated from 6um beads on IFCB130
new_value = in_value * (conversion**2) #this assumes the incoming value is biovolume
#print in_value
#print new_value
return new_value
def calculate_carbon_from_biovolume(invalue, category):
"""Calculate the cellular carbon from the given biovolume value based on
what category the image is assigned and how large it is. Conversion
formulas are from Table 4 in Menden-Deuer and Lessard (2000).
inputs:
invalue (float) = the biovolume value from the features file converted to microns
category (str) = the category to which the image was assigned
returns:
carbon_value (float) = the carbon calculated from the formulas
"""
diatoms = ['Asterionellopsis', 'Centric', 'Ch_simplex', 'Chaetoceros', 'Corethron', 'Cylindrotheca',
'Cymatosira', 'DactFragCeratul', 'Ditlyum', 'Eucampia', 'Eucampiacornuta', 'Guinardia',
'Hemiaulus', 'Leptocylindrus', 'Licmophora', 'Melosira', 'Odontella', 'Pleurosigma', 'Pseudonitzschia',
'Rhizosolenia', 'Skeletonema', 'Thalassionema', 'Thalassiosira', 'centric10', 'pennate', ]
if category in diatoms:
if invalue > 3000.: # diatoms > 3000 cubic microns (um**3)
carbon_value = (10**(-0.933)) * (invalue ** 0.881)
else:
carbon_value = (10**(-0.541)) * (invalue ** 0.811)
else:
if invalue < 3000.: # protist plankton < 3000 cubic microns (um**3)
carbon_value = (10**(-0.583)) * (invalue ** 0.860)
else:
carbon_value = (10**(-0.665)) * (invalue ** 0.939)
return carbon_value
def load_class_file_automated(in_class):
"""Load the automated classifier results and list of class names.
Returns:
category_list = list of category names
class_data = list classifications for each roi image
"""
f = loadmat(class_path + in_class)
class_data = f['TBclass_above_threshold'] #use this line for automated classifier results; can be 'TBclass_above_optthresh' if available
#class_data = [category[0][0] for category in class_data] #un-nest the MATLAB stuff #use this line for automated classifier results
class_data = [category[0] for category in class_data[0]] #un-nest the MATLAB stuff #use this line for CNN automated classifier results
category_list = f['class2useTB']
category_list = [category[0] for category in category_list[0]] #use this line for CNN automated classifier
#category_list = [category[0][0] for category in category_list] #un-nest the MATLAB stuff
return category_list, class_data
def load_class_file_manual(in_class):
#the structure of the mat file variable with the classes is slightly different in manual files
#classlist is a table of shape (num_rois x 3) with the columns being: roinum, manual category, automated category
f = loadmat(class_path + in_class)
roinums = None
class_data_manual = f['classlist']
class_data = f['classlist'][:,2]
roinums = f['classlist'][:,0]
for index, value in enumerate(class_data):
if not np.isnan(class_data_manual[index, 1]):
class_data[index] = class_data_manual[index,1]
roinums = [roinums[x] for x,y in enumerate(class_data) if not np.isnan(y)]
class_data = [x for x in class_data if not np.isnan(x)]
category_list = f['class2use_manual']
#print category_list
try:
category_list = [category[0] for category in category_list[0]] #this works with some of the files
except:
category_list = [category[0] if len(category) > 0 else '' for category in category_list[0]] #this works with the others
#class_data = [category_list[int(x-1)] for x in class_data]
#print class_data[0]
#print class_data_manual[0]
return category_list, class_data, roinums
def load_feature_file(in_feature):
f = pd.read_csv(feature_path + in_feature, index_col=0)
return f
if __name__ == '__main__':
# grab the list of files from each directory
list_of_feature_files = os.listdir(feature_path)
list_of_class_files = os.listdir(class_path)
print "Feature files: {}".format(len(list_of_feature_files))
print "Class files : {}".format(len(list_of_class_files))
# start working through the class files individually
for class_index, indiv_file in enumerate(list_of_class_files):
if indiv_file[-3:] == 'mat':
if not date_limiter or date_limiter == indiv_file[:len(date_limiter)]:
print "Processing {}...".format(indiv_file),
features_found = True
# try:
if 1:
feature_index = 0
while list_of_feature_files[feature_index][:21] != indiv_file[:21]:
feature_index += 1
if feature_index >= len(list_of_feature_files)-1:
#raise ValueError("The feature file was not found") #this will error out and stop the program
print "feature file not found."
features_found = False
print list_of_feature_files[feature_index][:21], indiv_file[:21]
continue
if features_found:
temp_biovolumes = grab_biovolume(list_of_feature_files.pop(feature_index), list_of_class_files[class_index], automated_or_manual)
temp_biovolumes.to_csv(outpath + indiv_file[:-3] + 'csv')
print "done!"
#except:
# print "something went wrong."
#break
#while list_of_feature_files[feature_index][:21] != indiv_file[:21]:
# feature_index += 1
# if feature_index >= len(list_of_feature_files)-1:
# #raise ValueError("The feature file was not found") #this will error out and stop the program
# print "feature file not found."; print list_of_feature_files[feature_index][:21], indiv_file[:21]
# features_found = False
#if features_found:
# temp_biovolumes = grab_biovolume(list_of_feature_files.pop(feature_index), list_of_class_files[class_index], automated_or_manual)
# temp_biovolumes.to_csv(outpath + indiv_file[:-3] + 'csv')
# print "done!"
else:
continue
| 50.364035 | 154 | 0.67404 |
f6fc1952b9998ff3ec460c7113711405116c4669
| 10,043 |
py
|
Python
|
src/RL/ComplexityKnobs.py
|
Voice-First-AI/generative-music-watson
|
e666f64602baab2e35a66c0a5c4389b1bd5666c9
|
[
"Apache-2.0"
] | null | null | null |
src/RL/ComplexityKnobs.py
|
Voice-First-AI/generative-music-watson
|
e666f64602baab2e35a66c0a5c4389b1bd5666c9
|
[
"Apache-2.0"
] | null | null | null |
src/RL/ComplexityKnobs.py
|
Voice-First-AI/generative-music-watson
|
e666f64602baab2e35a66c0a5c4389b1bd5666c9
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
Actions = {
'bass1': {
0:'PLAY_CHORD_PROGRESSION_TONE_SML_JUMP',
1:'PLAY_CHORD_PROGRESSION_TONE_MID_JUMP',
2:'PLAY_CHORD_PROGRESSION_TONE_BIG_JUMP',
3:'PLAY_CHORD_PROGRESSION_TONE_FOR_PRIMARY_KEY',
},
'bass2': {
'simple' : {
0:'PLAY_HOME_NOTE',
1:'PLAY_FIFTH_NOTE',
2:'PLAY_OCTAVE_UP',
},
'complex' : {
0:'PLAY_HOME_NOTE',
1:'PLAY_FIFTH_NOTE',
2:'PLAY_OCTAVE_UP',
#3:'CHROMATICS',
},
},
'bass3': {
'simple' : {
0:'PLAY_HOME_NOTE',
1:'PLAY_FIFTH_NOTE',
2:'PLAY_OCTAVE_UP',
},
'complex' : {
0:'PLAY_HOME_NOTE',
1:'PLAY_FIFTH_NOTE',
2:'PLAY_OCTAVE_UP',
#3:'CHROMATICS',
},
},
'melody5': {
0: 'PLAY_HOME_NOTE',
1: 'PLAY_CHORD_TONE',
2: 'PLAY_PASSING_TONE_UP',
3: 'PLAY_PASSING_TONE_DOWN',
4: 'PLAY_NEIGHBOR_TONE_UP',
5: 'PLAY_NEIGHBOR_TONE_DOWN',
6: 'PLAY_CHORD_TO_CHORD_UP',
7: 'PLAY_CHORD_TO_CHORD_DOWN',
},
} # End Actions
ReverseActions = {
'bass1': {
'PLAY_CHORD_PROGRESSION_TONE_SML_JUMP': 0,
'PLAY_CHORD_PROGRESSION_TONE_MID_JUMP': 1,
'PLAY_CHORD_PROGRESSION_TONE_BIG_JUMP': 2,
'PLAY_CHORD_PROGRESSION_TONE_FOR_PRIMARY_KEY': 3,
},
'bass2': {
'simple' : {
'PLAY_HOME_NOTE':0,
'PLAY_FIFTH_NOTE':1,
'PLAY_OCTAVE_UP':2,
},
'complex' : {
'PLAY_HOME_NOTE':0,
'PLAY_FIFTH_NOTE':1,
'PLAY_OCTAVE_UP':2,
#'CHROMATICS': 3,
},
},
'bass3': {
'simple' : {
'PLAY_HOME_NOTE':0,
'PLAY_FIFTH_NOTE':1,
'PLAY_OCTAVE_UP':2,
},
'complex' : {
'PLAY_HOME_NOTE':0,
'PLAY_FIFTH_NOTE':1,
'PLAY_OCTAVE_UP':2,
#'CHROMATICS': 3,
},
},
'melody5': {
'PLAY_HOME_NOTE' : 0,
'PLAY_CHORD_TONE' : 1,
'PLAY_PASSING_TONE_UP' : 2,
'PLAY_PASSING_TONE_DOWN' : 3,
'PLAY_NEIGHBOR_TONE_UP' : 4,
'PLAY_NEIGHBOR_TONE_DOWN' : 5,
'PLAY_CHORD_TO_CHORD_UP' : 6,
'PLAY_CHORD_TO_CHORD_DOWN' : 7,
},
} # End ReverseActions
def setKnobs ( layer, complexity, layerParams ) :
knobs = None
if ( layer == 'bass1' ) :
actions = Actions[layer]
reverseActions = ReverseActions[layer]
minPenalty = layerParams['minPenalty']
maxPenalty = layerParams['maxPenalty']
#self.trueMaxPenalty = -30 # ( -8 + ( -10 ) + ( -6 * 2 ) ) = ( -30 )
#self.penaltyBase = ( self.trueMaxPenalty * -1 ) + 1 # why 31, beacuse the adder for making all penalties positive is 31. why 31, because the maximum penalty is -30 and the min penalty = 3
#self.penaltyMin = self.penaltyBase + self.trueMinPenalty # so min penalty after adder = 3+31 = 34
#self.penaltyMax = self.penaltyBase + self.trueMaxPenalty # so max penalty after adder = -30 + 31 = 1
# min penalty = 3, max penalty = -30. adder = 31. so min penalty after adder = 34, max penalty = 1. so higher the number less is the penalty, smaller is the cp jump
#self.trueMaxPenalty = -24 # ( 0 + (-5) + (-19) )
#self.trueMinPenalty = 4 # ( 4 + (0) + (0) )
#self.penaltyBase = ( self.trueMaxPenalty * -1 ) + 1 # why 25, beacuse the adder for making all penalties positive is 25. why 25, because the maximum penalty is -24 and the min penalty = 4
#self.penaltyMin = self.penaltyBase + self.trueMinPenalty # so min penalty after adder = 4+25 = 29
#self.penaltyMax = self.penaltyBase + self.trueMaxPenalty # so max penalty after adder = -24 + 25 = 1
# min penalty = 29, max penalty = 1. higher the number, less is the penalty, smaller is the cp jump
if ( complexity == 'super_simple' ) :
knobs = {
'syncopation' : 'none', #high, low, medium
'cprogJumpLow' : minPenalty-5,
'cprogJumpHigh' : minPenalty,
'CProgComplexity' : [ 'obvious' ]
}
elif ( complexity == 'simple' ) :
knobs = {
'syncopation' : 'none', #high, low, medium
'cprogJumpLow' : minPenalty-10,
'cprogJumpHigh' : minPenalty,
'CProgComplexity' : [ 'obvious' ]
}
elif ( complexity == 'semi_complex' ) :
knobs = {
'syncopation' : 'none', #high, low, medium
'cprogJumpLow' : minPenalty-12,
'cprogJumpHigh' : minPenalty-6,
'CProgComplexity' : [ 'obvious', 'unusual' ]
}
elif ( complexity == 'complex' ) :
knobs = {
'syncopation' : 'none', #high, low, medium
'cprogJumpLow' : minPenalty-24,
'cprogJumpHigh' : minPenalty-12,
'CProgComplexity' : [ 'unusual', 'obscure' ]
}
# end if layer == bass1
elif ( layer == 'bass2' ) :
if ( complexity == 'super_simple' ) :
actions = Actions[layer]['simple']
reverseActions = ReverseActions[layer]['simple']
knobs = {
'chordToneThresholdHigh' : 0.1,
'nonChordToneThresholdHigh': 0.05,
'chordToneThresholdLow' : 0.95,
'nonChordToneThresholdLow' : 0.00,
}
elif ( complexity == 'simple' ) :
actions = Actions[layer]['simple']
reverseActions = ReverseActions[layer]['simple']
knobs = {
'chordToneThresholdHigh' : 0.1,
'nonChordToneThresholdHigh': 0.10,
'chordToneThresholdLow' : 0.90,
'nonChordToneThresholdLow' : 0.00,
}
elif ( complexity == 'semi_complex' ) :
actions = Actions[layer]['complex']
reverseActions = ReverseActions[layer]['complex']
reverseActions = ReverseActions[layer]['simple']
knobs = {
'chordToneThresholdHigh' : 0.90,
'nonChordToneThresholdHigh': 0.20,
'chordToneThresholdLow' : 0.80,
'nonChordToneThresholdLow' : 0.10,
}
elif ( complexity == 'complex' ) :
actions = Actions[layer]['complex']
reverseActions = ReverseActions[layer]['complex']
knobs = {
'chordToneThresholdHigh' : 0.80,
'nonChordToneThresholdHigh': 0.40,
'chordToneThresholdLow' : 0.60,
'nonChordToneThresholdLow' : 0.10,
}
# end if layer == bass2
elif ( layer == 'bass3' ) :
if ( complexity.endswith('simple') ) :
actions = Actions[layer]['simple']
reverseActions = ReverseActions[layer]['simple']
knobs = {}
else :
actions = Actions[layer]['complex']
reverseActions = ReverseActions[layer]['complex']
knobs = {}
# end if layer == bass3
elif ( layer == 'melody5' ) :
actions = Actions[layer]
reverseActions = ReverseActions[layer]
if ( complexity == 'super_simple' ) :
knobs = {
'chordToneThresholdHigh' : 0.1,
'nonChordToneThresholdHigh': 0.10,
'chordToneThresholdLow' : 0.90,
'nonChordToneThresholdLow' : 0.00,
'gestureMvmt' : 8 #'verySmall', stay within 8 halfsteps of base home note
}
elif ( complexity == 'simple' ) :
knobs = {
'chordToneThresholdHigh' : 0.1,
'nonChordToneThresholdHigh': 0.10,
'chordToneThresholdLow' : 0.90,
'nonChordToneThresholdLow' : 0.00,
'gestureMvmt' : 12 #'small', stay within 12 halfsteps of base home note
}
elif ( complexity == 'semi_complex' ) :
knobs = {
'chordToneThresholdHigh' : 0.60,
'nonChordToneThresholdHigh': 0.60,
'chordToneThresholdLow' : 0.45,
'nonChordToneThresholdLow' : 0.45,
'gestureMvmt' : 18 #'medium', stay within 18 halfsteps of base home note
}
elif ( complexity == 'complex' ) :
knobs = {
'chordToneThresholdHigh' : 0.1,
'nonChordToneThresholdHigh': 0.10,
'chordToneThresholdLow' : 0.90,
'nonChordToneThresholdLow' : 0.00,
'gestureMvmt' : 24 #'medium', stay within 24 halfsteps of base home note
}
# end if layer == melody5
return knobs, actions, reverseActions
| 34.276451 | 199 | 0.466195 |
04a2294e7327e3dc88c1ddd33a9f8bd270950964
| 407 |
py
|
Python
|
tests/seahub/views/init/test_repo_download_dir.py
|
samuelduann/seahub
|
90ce99e8aa27e3e127aedff2eb2f2ee75228b857
|
[
"Apache-2.0"
] | 420 |
2015-01-03T11:34:46.000Z
|
2022-03-10T07:15:41.000Z
|
tests/seahub/views/init/test_repo_download_dir.py
|
samuelduann/seahub
|
90ce99e8aa27e3e127aedff2eb2f2ee75228b857
|
[
"Apache-2.0"
] | 735 |
2015-01-04T21:22:51.000Z
|
2022-03-31T09:26:07.000Z
|
tests/seahub/views/init/test_repo_download_dir.py
|
samuelduann/seahub
|
90ce99e8aa27e3e127aedff2eb2f2ee75228b857
|
[
"Apache-2.0"
] | 379 |
2015-01-05T17:08:03.000Z
|
2022-03-06T00:11:50.000Z
|
from django.urls import reverse
from seahub.test_utils import BaseTestCase
class RepoDownloadDirTest(BaseTestCase):
def setUp(self):
self.login_as(self.user)
def test_can_render(self):
resp = self.client.get(reverse('repo_download_dir', args=[self.repo.id]) + '?p=' + self.folder)
self.assertEqual(302, resp.status_code)
assert '8082' in resp.headers['location']
| 29.071429 | 103 | 0.700246 |
983bd49a117763b699e45c32c51a0544c73ed0e8
| 13,040 |
py
|
Python
|
www/src/Lib/test/test_email/test_policy.py
|
raspberrypieman/brython
|
2cc23d1da6acda604d4a56b4c9d464eb7e374eda
|
[
"BSD-3-Clause"
] | 5,926 |
2015-01-01T07:45:08.000Z
|
2022-03-31T12:34:38.000Z
|
www/src/Lib/test/test_email/test_policy.py
|
raspberrypieman/brython
|
2cc23d1da6acda604d4a56b4c9d464eb7e374eda
|
[
"BSD-3-Clause"
] | 1,728 |
2015-01-01T01:09:12.000Z
|
2022-03-30T23:25:22.000Z
|
check-python33-manual/samples/standard_library_337/Lib/test/test_email/test_policy.py
|
DaveKaretnyk/parsing-utils2
|
40085bbd399fa605f2f2a4708d385a64ffc907de
|
[
"MIT"
] | 574 |
2015-01-02T01:36:10.000Z
|
2022-03-26T10:18:48.000Z
|
import io
import types
import textwrap
import unittest
import email.policy
import email.parser
import email.generator
from email import headerregistry
def make_defaults(base_defaults, differences):
defaults = base_defaults.copy()
defaults.update(differences)
return defaults
class PolicyAPITests(unittest.TestCase):
longMessage = True
# Base default values.
compat32_defaults = {
'max_line_length': 78,
'linesep': '\n',
'cte_type': '8bit',
'raise_on_defect': False,
}
# These default values are the ones set on email.policy.default.
# If any of these defaults change, the docs must be updated.
policy_defaults = compat32_defaults.copy()
policy_defaults.update({
'raise_on_defect': False,
'header_factory': email.policy.EmailPolicy.header_factory,
'refold_source': 'long',
})
# For each policy under test, we give here what we expect the defaults to
# be for that policy. The second argument to make defaults is the
# difference between the base defaults and that for the particular policy.
new_policy = email.policy.EmailPolicy()
policies = {
email.policy.compat32: make_defaults(compat32_defaults, {}),
email.policy.default: make_defaults(policy_defaults, {}),
email.policy.SMTP: make_defaults(policy_defaults,
{'linesep': '\r\n'}),
email.policy.HTTP: make_defaults(policy_defaults,
{'linesep': '\r\n',
'max_line_length': None}),
email.policy.strict: make_defaults(policy_defaults,
{'raise_on_defect': True}),
new_policy: make_defaults(policy_defaults, {}),
}
# Creating a new policy creates a new header factory. There is a test
# later that proves this.
policies[new_policy]['header_factory'] = new_policy.header_factory
def test_defaults(self):
for policy, expected in self.policies.items():
for attr, value in expected.items():
self.assertEqual(getattr(policy, attr), value,
("change {} docs/docstrings if defaults have "
"changed").format(policy))
def test_all_attributes_covered(self):
for policy, expected in self.policies.items():
for attr in dir(policy):
if (attr.startswith('_') or
isinstance(getattr(email.policy.EmailPolicy, attr),
types.FunctionType)):
continue
else:
self.assertIn(attr, expected,
"{} is not fully tested".format(attr))
def test_abc(self):
with self.assertRaises(TypeError) as cm:
email.policy.Policy()
msg = str(cm.exception)
abstract_methods = ('fold',
'fold_binary',
'header_fetch_parse',
'header_source_parse',
'header_store_parse')
for method in abstract_methods:
self.assertIn(method, msg)
def test_policy_is_immutable(self):
for policy, defaults in self.policies.items():
for attr in defaults:
with self.assertRaisesRegex(AttributeError, attr+".*read-only"):
setattr(policy, attr, None)
with self.assertRaisesRegex(AttributeError, 'no attribute.*foo'):
policy.foo = None
def test_set_policy_attrs_when_cloned(self):
# None of the attributes has a default value of None, so we set them
# all to None in the clone call and check that it worked.
for policyclass, defaults in self.policies.items():
testattrdict = {attr: None for attr in defaults}
policy = policyclass.clone(**testattrdict)
for attr in defaults:
self.assertIsNone(getattr(policy, attr))
def test_reject_non_policy_keyword_when_called(self):
for policyclass in self.policies:
with self.assertRaises(TypeError):
policyclass(this_keyword_should_not_be_valid=None)
with self.assertRaises(TypeError):
policyclass(newtline=None)
def test_policy_addition(self):
expected = self.policy_defaults.copy()
p1 = email.policy.default.clone(max_line_length=100)
p2 = email.policy.default.clone(max_line_length=50)
added = p1 + p2
expected.update(max_line_length=50)
for attr, value in expected.items():
self.assertEqual(getattr(added, attr), value)
added = p2 + p1
expected.update(max_line_length=100)
for attr, value in expected.items():
self.assertEqual(getattr(added, attr), value)
added = added + email.policy.default
for attr, value in expected.items():
self.assertEqual(getattr(added, attr), value)
def test_register_defect(self):
class Dummy:
def __init__(self):
self.defects = []
obj = Dummy()
defect = object()
policy = email.policy.EmailPolicy()
policy.register_defect(obj, defect)
self.assertEqual(obj.defects, [defect])
defect2 = object()
policy.register_defect(obj, defect2)
self.assertEqual(obj.defects, [defect, defect2])
class MyObj:
def __init__(self):
self.defects = []
class MyDefect(Exception):
pass
def test_handle_defect_raises_on_strict(self):
foo = self.MyObj()
defect = self.MyDefect("the telly is broken")
with self.assertRaisesRegex(self.MyDefect, "the telly is broken"):
email.policy.strict.handle_defect(foo, defect)
def test_handle_defect_registers_defect(self):
foo = self.MyObj()
defect1 = self.MyDefect("one")
email.policy.default.handle_defect(foo, defect1)
self.assertEqual(foo.defects, [defect1])
defect2 = self.MyDefect("two")
email.policy.default.handle_defect(foo, defect2)
self.assertEqual(foo.defects, [defect1, defect2])
class MyPolicy(email.policy.EmailPolicy):
defects = None
def __init__(self, *args, **kw):
super().__init__(*args, defects=[], **kw)
def register_defect(self, obj, defect):
self.defects.append(defect)
def test_overridden_register_defect_still_raises(self):
foo = self.MyObj()
defect = self.MyDefect("the telly is broken")
with self.assertRaisesRegex(self.MyDefect, "the telly is broken"):
self.MyPolicy(raise_on_defect=True).handle_defect(foo, defect)
def test_overriden_register_defect_works(self):
foo = self.MyObj()
defect1 = self.MyDefect("one")
my_policy = self.MyPolicy()
my_policy.handle_defect(foo, defect1)
self.assertEqual(my_policy.defects, [defect1])
self.assertEqual(foo.defects, [])
defect2 = self.MyDefect("two")
my_policy.handle_defect(foo, defect2)
self.assertEqual(my_policy.defects, [defect1, defect2])
self.assertEqual(foo.defects, [])
def test_default_header_factory(self):
h = email.policy.default.header_factory('Test', 'test')
self.assertEqual(h.name, 'Test')
self.assertIsInstance(h, headerregistry.UnstructuredHeader)
self.assertIsInstance(h, headerregistry.BaseHeader)
class Foo:
parse = headerregistry.UnstructuredHeader.parse
def test_each_Policy_gets_unique_factory(self):
policy1 = email.policy.EmailPolicy()
policy2 = email.policy.EmailPolicy()
policy1.header_factory.map_to_type('foo', self.Foo)
h = policy1.header_factory('foo', 'test')
self.assertIsInstance(h, self.Foo)
self.assertNotIsInstance(h, headerregistry.UnstructuredHeader)
h = policy2.header_factory('foo', 'test')
self.assertNotIsInstance(h, self.Foo)
self.assertIsInstance(h, headerregistry.UnstructuredHeader)
def test_clone_copies_factory(self):
policy1 = email.policy.EmailPolicy()
policy2 = policy1.clone()
policy1.header_factory.map_to_type('foo', self.Foo)
h = policy1.header_factory('foo', 'test')
self.assertIsInstance(h, self.Foo)
h = policy2.header_factory('foo', 'test')
self.assertIsInstance(h, self.Foo)
def test_new_factory_overrides_default(self):
mypolicy = email.policy.EmailPolicy()
myfactory = mypolicy.header_factory
newpolicy = mypolicy + email.policy.strict
self.assertEqual(newpolicy.header_factory, myfactory)
newpolicy = email.policy.strict + mypolicy
self.assertEqual(newpolicy.header_factory, myfactory)
def test_adding_default_policies_preserves_default_factory(self):
newpolicy = email.policy.default + email.policy.strict
self.assertEqual(newpolicy.header_factory,
email.policy.EmailPolicy.header_factory)
self.assertEqual(newpolicy.__dict__, {'raise_on_defect': True})
# XXX: Need subclassing tests.
# For adding subclassed objects, make sure the usual rules apply (subclass
# wins), but that the order still works (right overrides left).
class TestPolicyPropagation(unittest.TestCase):
# The abstract methods are used by the parser but not by the wrapper
# functions that call it, so if the exception gets raised we know that the
# policy was actually propagated all the way to feedparser.
class MyPolicy(email.policy.Policy):
def badmethod(self, *args, **kw):
raise Exception("test")
fold = fold_binary = header_fetch_parser = badmethod
header_source_parse = header_store_parse = badmethod
def test_message_from_string(self):
with self.assertRaisesRegex(Exception, "^test$"):
email.message_from_string("Subject: test\n\n",
policy=self.MyPolicy)
def test_message_from_bytes(self):
with self.assertRaisesRegex(Exception, "^test$"):
email.message_from_bytes(b"Subject: test\n\n",
policy=self.MyPolicy)
def test_message_from_file(self):
f = io.StringIO('Subject: test\n\n')
with self.assertRaisesRegex(Exception, "^test$"):
email.message_from_file(f, policy=self.MyPolicy)
def test_message_from_binary_file(self):
f = io.BytesIO(b'Subject: test\n\n')
with self.assertRaisesRegex(Exception, "^test$"):
email.message_from_binary_file(f, policy=self.MyPolicy)
# These are redundant, but we need them for black-box completeness.
def test_parser(self):
p = email.parser.Parser(policy=self.MyPolicy)
with self.assertRaisesRegex(Exception, "^test$"):
p.parsestr('Subject: test\n\n')
def test_bytes_parser(self):
p = email.parser.BytesParser(policy=self.MyPolicy)
with self.assertRaisesRegex(Exception, "^test$"):
p.parsebytes(b'Subject: test\n\n')
# Now that we've established that all the parse methods get the
# policy in to feedparser, we can use message_from_string for
# the rest of the propagation tests.
def _make_msg(self, source='Subject: test\n\n', policy=None):
self.policy = email.policy.default.clone() if policy is None else policy
return email.message_from_string(source, policy=self.policy)
def test_parser_propagates_policy_to_message(self):
msg = self._make_msg()
self.assertIs(msg.policy, self.policy)
def test_parser_propagates_policy_to_sub_messages(self):
msg = self._make_msg(textwrap.dedent("""\
Subject: mime test
MIME-Version: 1.0
Content-Type: multipart/mixed, boundary="XXX"
--XXX
Content-Type: text/plain
test
--XXX
Content-Type: text/plain
test2
--XXX--
"""))
for part in msg.walk():
self.assertIs(part.policy, self.policy)
def test_message_policy_propagates_to_generator(self):
msg = self._make_msg("Subject: test\nTo: foo\n\n",
policy=email.policy.default.clone(linesep='X'))
s = io.StringIO()
g = email.generator.Generator(s)
g.flatten(msg)
self.assertEqual(s.getvalue(), "Subject: testXTo: fooXX")
def test_message_policy_used_by_as_string(self):
msg = self._make_msg("Subject: test\nTo: foo\n\n",
policy=email.policy.default.clone(linesep='X'))
self.assertEqual(msg.as_string(), "Subject: testXTo: fooXX")
if __name__ == '__main__':
unittest.main()
| 40.371517 | 80 | 0.625767 |
39bda80ab2fb5c028dbbcbb14c6913ffe7ebe634
| 147 |
py
|
Python
|
toolchain/riscv/MSYS/python/Lib/test/test_importlib/source/__init__.py
|
zhiqiang-hu/bl_iot_sdk
|
154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d
|
[
"Apache-2.0"
] | 207 |
2018-10-01T08:53:01.000Z
|
2022-03-14T12:15:54.000Z
|
toolchain/riscv/MSYS/python/Lib/test/test_importlib/source/__init__.py
|
zhiqiang-hu/bl_iot_sdk
|
154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d
|
[
"Apache-2.0"
] | 8 |
2019-06-29T14:18:51.000Z
|
2022-02-19T07:30:27.000Z
|
toolchain/riscv/MSYS/python/Lib/test/test_importlib/source/__init__.py
|
zhiqiang-hu/bl_iot_sdk
|
154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d
|
[
"Apache-2.0"
] | 76 |
2020-03-16T01:47:46.000Z
|
2022-03-21T16:37:07.000Z
|
import os
from test.support import load_package_tests
def load_tests(*args):
return load_package_tests(os.path.dirname(__file__), *args)
| 24.5 | 64 | 0.768707 |
7930da233ffbf0403b81a7d7713e521996513812
| 5,728 |
py
|
Python
|
ProcessOscSignal.py
|
Creative-AI-Research-Group/pysideScore
|
f7499b12bbe123135901fc4ca52a0fccf6107f38
|
[
"MIT"
] | null | null | null |
ProcessOscSignal.py
|
Creative-AI-Research-Group/pysideScore
|
f7499b12bbe123135901fc4ca52a0fccf6107f38
|
[
"MIT"
] | null | null | null |
ProcessOscSignal.py
|
Creative-AI-Research-Group/pysideScore
|
f7499b12bbe123135901fc4ca52a0fccf6107f38
|
[
"MIT"
] | null | null | null |
import glob
import random
from operator import itemgetter
from PySide2.QtGui import QImage, QPainter
MAX_SIZE = 500
MAX_LIFESPAN = 250
class ProcessOscSignal:
def __init__(self):
self.queue = []
self.visual_types = ("line",
"ellipse",
"rect",
"image")
self.external_images = [QImage(image_to_load) for image_to_load in glob.glob("../images/*.png")]
self.image_composition_modes = (QPainter.CompositionMode_HardLight,
QPainter.CompositionMode_Difference,
QPainter.CompositionMode_ColorBurn,
QPainter.CompositionMode_ColorDodge,
QPainter.CompositionMode_Multiply,
QPainter.CompositionMode_SoftLight)
def add_to_queue(self, osc_signal_dict):
axisa = osc_signal_dict["axisa"]
axisb = osc_signal_dict["axisb"]
if (axisa < -0.2 or axisa > 0.2 or axisb < 0.2 or axisb > 0.2) and len(self.queue) < 10:
self.process_osc_signal(osc_signal_dict)
def process_osc_signal(self, osc_signal_dict):
# print("processing signal")
axisa, axisb, mlx, mly, kinx, kinz, width, height = itemgetter("axisa",
"axisb",
"mlx",
"mly",
"kinx",
"kinz",
"width",
"height")(osc_signal_dict)
final_visual = dict(type=random.choice(self.visual_types),
lifespan=self.lifespan(axisa, axisb, mlx, mly, kinx, kinz),
color={"r": random.randint(0, 255),
"g": random.randint(0, 255),
"b": random.randint(0, 255),
"a": random.randint(0, 255)},
image=random.randint(0, len(self.external_images) - 1),
image_transparency=random.random(),
image_composition_mode=random.choice(self.image_composition_modes),
pen=random.randint(1, MAX_SIZE),
size=random.randint(1, MAX_SIZE),
position={"x": random.randint(0, width),
"y": random.randint(0, height)},
direction=random.randint(0, 11))
self.queue.append(final_visual)
def lifespan(self, a, b, c, d, e, f):
lifespan = a + b + c + d + e + f
if lifespan < 0:
lifespan *= -1
while lifespan > MAX_LIFESPAN:
lifespan /= random.randint(2, 10)
return int(lifespan)
def update_queue(self):
if len(self.queue):
for i, val in enumerate(self.queue):
lifespan = val["lifespan"] - 1
if not lifespan:
del self.queue[i]
else:
self.queue[i]["lifespan"] = lifespan
direction = self.queue[i]["direction"]
if direction == 0:
self.queue[i]["position"]["y"] = self.queue[i]["position"]["y"] - 1
elif direction == 1:
self.queue[i]["position"]["y"] = self.queue[i]["position"]["y"] - 1
self.queue[i]["position"]["x"] = self.queue[i]["position"]["x"] + 1
elif direction == 2:
self.queue[i]["position"]["x"] = self.queue[i]["position"]["x"] + 1
elif direction == 3:
self.queue[i]["position"]["y"] = self.queue[i]["position"]["y"] + 1
self.queue[i]["position"]["x"] = self.queue[i]["position"]["x"] + 1
elif direction == 4:
self.queue[i]["position"]["y"] = self.queue[i]["position"]["y"] + 1
elif direction == 5:
self.queue[i]["position"]["y"] = self.queue[i]["position"]["y"] + 1
self.queue[i]["position"]["x"] = self.queue[i]["position"]["x"] - 1
elif direction == 6:
self.queue[i]["position"]["x"] = self.queue[i]["position"]["x"] - 1
elif direction == 7:
self.queue[i]["position"]["y"] = self.queue[i]["position"]["y"] - 1
self.queue[i]["position"]["x"] = self.queue[i]["position"]["x"] - 1
elif direction >= 10:
if bool(random.getrandbits(1)):
if bool(random.getrandbits(1)):
self.queue[i]["position"]["x"] = self.queue[i]["position"]["x"] - 1
else:
self.queue[i]["position"]["y"] = self.queue[i]["position"]["y"] - 1
else:
if bool(random.getrandbits(1)):
self.queue[i]["position"]["x"] = self.queue[i]["position"]["x"] + 1
else:
self.queue[i]["position"]["y"] = self.queue[i]["position"]["y"] + 1
| 51.142857 | 104 | 0.417947 |
62eaa83559d8af2742da2b7fbdd3d7c44de1474c
| 1,698 |
py
|
Python
|
CTA1Option1 Dataset Exploration.py
|
mwchalumeau/github-slideshow
|
5273cabc7b5ca5837fee35733b6e85e6d67e8131
|
[
"MIT"
] | null | null | null |
CTA1Option1 Dataset Exploration.py
|
mwchalumeau/github-slideshow
|
5273cabc7b5ca5837fee35733b6e85e6d67e8131
|
[
"MIT"
] | null | null | null |
CTA1Option1 Dataset Exploration.py
|
mwchalumeau/github-slideshow
|
5273cabc7b5ca5837fee35733b6e85e6d67e8131
|
[
"MIT"
] | null | null | null |
#############################################################
#Program name - Data Exploration
#input - NONE
#output - Some Exploration statistics
###############################################################
import pandas as pd
#Create data_frame of array values
df= pd.DataFrame({ 'name':['matt','lisa','richard','john','Julia','jane','marlon'],
'age':[23,78,22,19,45,33,20],
'gender':['M','F','M','M','M','F','M'],
'state':['DC','CO','DE','VA','MD','DE','NY'],
'years_of_service':[10,0,2,0,2,1,5],
'iq':[300,100,110,200,300,10,40]
})
########################################################################
#BEGIN extract a 25% sample of data
#######################################################################
rows =df.sample(frac=.25)
#validate first to check if sample is 0.25 times data or not
if(0.25*(len(df))==len(rows)):
print(len(df), len(rows))
#Display Sample Percentage
print('sample of 25%',rows)
#END extract a 25% sample of data
#######################################################################
#BEGIN Split categorical variables by gender, Sum, Mean, count,
#and describe on the data
#######################################################################
#Categorial Variables splitting
groupby_gender= df.groupby('gender')
for gender, value in groupby_gender['iq']:
print((gender,value.mean()))
#Find the Summation of all ages in the data
SumofAge=df['age'].sum()
print ('Sum of Ages', SumofAge)
MeanAge=df['age'].mean()
print('Average Ages', MeanAge)
#Find the mean of all columns
print('Means of each column', df.mean(axis=0))
#Describe the Data
print (df['iq'].describe())
#END
| 33.96 | 84 | 0.48881 |
eec1c5c2eeffdaade5785cd0e74d201fbfb5bde5
| 1,142 |
py
|
Python
|
pykot/api.py
|
ikasamah/pykot
|
2203c679b85463a7b38fce7941af406301cf7819
|
[
"MIT"
] | null | null | null |
pykot/api.py
|
ikasamah/pykot
|
2203c679b85463a7b38fce7941af406301cf7819
|
[
"MIT"
] | null | null | null |
pykot/api.py
|
ikasamah/pykot
|
2203c679b85463a7b38fce7941af406301cf7819
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function, unicode_literals
import logging
import requests
class V1Api(object):
BASE_URL = 'https://api.kingtime.co.jp/v1.0'
# BASE_URL = 'http://echo.jsontest.com'
def __init__(self, access_token=None):
self.log = logging.getLogger('{0.__module__}.{0.__name__}'.format(self.__class__))
self.access_token = access_token
def _build_url(self, path):
return self.BASE_URL + path
def _default_headers(self):
return {'Authorization': "Bearer %s" % self.access_token}
def request(self, path, params=None, method="GET", headers=None):
url = self._build_url(path)
self.log.info("{method} {url!r} with params {params!r}".format(method=method, url=url, params=params))
hdrs = self._default_headers()
if headers is not None:
hdrs.update(headers)
raw = requests.request(method, url, params=params, headers=headers)
# No Content
if raw.status_code == 204:
resp = {}
else:
resp = raw.json()
return resp
| 30.052632 | 110 | 0.634851 |
0537bae6b33c220bf21c4a818856bc34273c1d08
| 6,784 |
py
|
Python
|
2018/day_06/solution_p1.py
|
rvaughan/AdventOfCode2017
|
fb9199282c0083cd0b3072c27e63ea83d866efc2
|
[
"MIT"
] | null | null | null |
2018/day_06/solution_p1.py
|
rvaughan/AdventOfCode2017
|
fb9199282c0083cd0b3072c27e63ea83d866efc2
|
[
"MIT"
] | null | null | null |
2018/day_06/solution_p1.py
|
rvaughan/AdventOfCode2017
|
fb9199282c0083cd0b3072c27e63ea83d866efc2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
This code holds the solution for part 1 of day 6 of the Advent of Code for 2018.
"""
from collections import Counter
import sys
def build_grid(test_input):
co_ords = {}
max_x = 0
max_y = 0
for id, c in zip(xrange(len(test_input)), test_input):
x, y = c.split(', ')
co_ords[id] = [int(x), int(y)]
max_x = max(max_x, int(x))
max_y = max(max_x, int(y))
max_x += 2
max_y += 2
grid = []
for col in xrange(max_y):
grid.append(['.'] * max_x)
for pos in co_ords.keys():
grid[co_ords[pos][1]][co_ords[pos][0]] = int(pos)
return grid, co_ords
def check_grid_cell(co_ords, y, x):
closest = []
closest_pos = 999999999999
for c_val in coords:
c = coords[c_val]
# print " |", c_val, c[0], c[1], x, y, abs(x - c[0]), abs(y - c[1])
c_pos = abs(x - c[0]) + abs(y - c[1])
if c_pos == closest_pos:
# print "==="
closest.append(c_val)
# print " > ", c_val, c_pos, closest_pos
elif c_pos < closest_pos:
# print "<<<"
closest_pos = c_pos
closest = []
closest.append(c_val)
# print " > ", c_val, c_pos, closest_pos
# else:
# print ">>>"
# print "-> ", c_val, c_pos, closest_pos
# print "-->", closest
if len(closest) == 1:
return closest[0]
return '.'
def find_closest_points(grid, coords):
for x in xrange(len(grid)):
for y in xrange(len(grid[x])):
if grid[x][y] == '.':
closest = check_grid_cell(coords, x, y)
# print x, y, closest
grid[x][y] = closest
def find_edge_coords(grid):
ignore_set = set()
# Top edge
for y in xrange(len(grid[0])):
if grid[0][y] != '.':
ignore_set.add(grid[0][y])
# Bottom edge
for y in xrange(len(grid[0])):
if grid[len(grid[0])-1][y] != '.':
ignore_set.add(grid[len(grid[0])-1][y])
# Left edge
for x in xrange(len(grid)):
if grid[x][0] != '.':
ignore_set.add(grid[x][0])
# Right edge
for x in xrange(len(grid)):
if grid[x][len(grid[0])-1] != '.':
ignore_set.add(grid[x][len(grid[0])-1])
return ignore_set
def find_none_infinite(grid):
result_list = set()
ignore_list = find_edge_coords(grid)
for x in xrange(len(grid)):
for y in xrange(len(grid[x])):
if grid[x][y] != '.' and grid[x][y] not in ignore_list:
result_list.add(grid[x][y])
return list(result_list)
def calc_size(grid, coord):
count = 0
for x in xrange(len(grid)):
for y in xrange(len(grid[x])):
if grid[x][y] == coord:
count += 1
return count
def calc_max(grid, coord_list):
max_size = 0
max_coord = -1
for coord in coord_list:
coord_size = calc_size(grid, coord)
if coord_size > max_size:
max_size = coord_size
max_coord = coord
return max_coord, max_size
def dump_grid(grid):
for x in xrange(len(grid)):
line = ""
for y in xrange(len(grid[x])):
line += str(grid[x][y])
print line
def run_extract_test(test_input, exp_id, exp_offset_x, exp_offset_y, exp_width, exp_height):
"""
Helper method for running some unit tests whilst minimising repetative code.
"""
x_id, offset_x, offset_y, width, height = extract_instructions(test_input)
if x_id != exp_id or offset_x != exp_offset_x or offset_y != exp_offset_y or width != exp_width or height != exp_height:
print "Test for {0} FAILED. Got a result of {1}, {2}, {3}, {4}, {5}".format(test_input, id, offset_x, offset_y, width, height)
sys.exit(-1)
print "Test for {0} passed.".format(test_input)
# Run any tests that we've defined to help validate our code prior to
# trying to solve the puzzle.
print ""
print "-----------------"
print "Testing.........."
print "-----------------"
print ""
test_input="""1, 1
1, 6
8, 3
3, 4
5, 5
8, 9"""
input_data = [line for line in test_input.split('\n')]
grid, coords = build_grid(input_data)
# dump_grid(grid)
result = check_grid_cell(coords, 0, 0)
assert 0 == result, "Wrong result at 0, 1, got %d not %d" % (result, 0)
result = check_grid_cell(coords, 0, 1)
assert 0 == result, "Wrong result at 0, 1, got %d not %d" % (result, 0)
result = check_grid_cell(coords, 0, 2)
assert 0 == result, "Wrong result at 0, 2, got %d not %d" % (result, 0)
result = check_grid_cell(coords, 0, 3)
assert 0 == result, "Wrong result at 0, 3, got %d not %d" % (result, 0)
result = check_grid_cell(coords, 0, 4)
assert 0 == result, "Wrong result at 0, 4, got %d not %d" % (result, 0)
result = check_grid_cell(coords, 0, 5)
assert '.' == result, "Wrong result at 0, 5, got %d not %d" % (result, '.')
result = check_grid_cell(coords, 0, 6)
assert 2 == result, "Wrong result at 0, 6, got %d not %d" % (result, 2)
result = check_grid_cell(coords, 0, 7)
assert 2 == result, "Wrong result at 0, 7, got %d not %d" % (result, 2)
result = check_grid_cell(coords, 0, 8)
assert 2 == result, "Wrong result at 0, 8, got %d not %d" % (result, 2)
find_closest_points(grid, coords)
# dump_grid(grid)
finite_coords = find_none_infinite(grid)
assert len(finite_coords) == 2, "Wrong number of finite results, expected %d but got %d" % (2, len(finite_coords))
assert finite_coords[0] == 3, "Incorrect coordinate found %d was looking for %d" % (3, finite_coords[0])
assert finite_coords[1] == 4, "Incorrect coordinate found %d was looking for %d" % (4, finite_coords[1])
coord_size = calc_size(grid, 3)
assert coord_size == 9, "Incorrect size for coord 3, expected %d but got %d" % (9, coord_size)
coord_size = calc_size(grid, 4)
assert coord_size == 17, "Incorrect size for coord 4, expected %d but got %d" % (17, coord_size)
max_coord, max_size = calc_max(grid, finite_coords)
assert max_coord == 4, "Incorrect size for coord 4, expected %d but got %d" % (17, coord_size)
assert max_size == 17, "Incorrect size for coord 4, expected %d but got %d" % (17, coord_size)
print ""
print "-----------------"
print "All Tests PASSED."
print "-----------------"
print ""
# Ok, so if we reach here, then we can be reasonably sure that the code
# above is working correctly. Let's use the actual captcha now.
with open("input.txt", "r") as f:
input_data = [line for line in f]
grid, coords = build_grid(input_data)
find_closest_points(grid, coords)
finite_coords = find_none_infinite(grid)
max_coord, max_size = calc_max(grid, finite_coords)
print "Solution is coord: {}, size: {}".format(max_coord, max_size)
| 25.992337 | 134 | 0.592276 |
d2defc80486762d367cf5c0c1424c8f242c9128b
| 665 |
py
|
Python
|
imagefactory-plugins/EC2Cloud/__init__.py
|
henrysher/imagefactory
|
6dbcfa773913f1863470adc40c84baac67321bf1
|
[
"Apache-2.0"
] | null | null | null |
imagefactory-plugins/EC2Cloud/__init__.py
|
henrysher/imagefactory
|
6dbcfa773913f1863470adc40c84baac67321bf1
|
[
"Apache-2.0"
] | null | null | null |
imagefactory-plugins/EC2Cloud/__init__.py
|
henrysher/imagefactory
|
6dbcfa773913f1863470adc40c84baac67321bf1
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from EC2Cloud import EC2Cloud as delegate_class
| 36.944444 | 76 | 0.73985 |
6dd78d0b4a4003ed069c8aca05f41121ca7b74ea
| 3,849 |
py
|
Python
|
venv/Lib/site-packages/dash_bootstrap_components/_components/RadioButton.py
|
hanzzhu/chadle
|
ac1d63b0410bb43f3fab362bb00abfc2e8790b9d
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/dash_bootstrap_components/_components/RadioButton.py
|
hanzzhu/chadle
|
ac1d63b0410bb43f3fab362bb00abfc2e8790b9d
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/dash_bootstrap_components/_components/RadioButton.py
|
hanzzhu/chadle
|
ac1d63b0410bb43f3fab362bb00abfc2e8790b9d
|
[
"Apache-2.0"
] | null | null | null |
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class RadioButton(Component):
"""A RadioButton component.
Creates a single radio button. Use the `checked` prop in your callbacks.
Keyword arguments:
- id (string; optional):
The ID of this component, used to identify dash components in
callbacks. The ID needs to be unique across all of the components
in an app.
- checked (boolean; default False):
Whether RadioButton has been checked or not.
- className (string; optional):
The class of the container (div).
- disabled (boolean; optional):
Disable the RadioButton.
- key (string; optional):
A unique identifier for the component, used to improve performance
by React.js while rendering components See
https://reactjs.org/docs/lists-and-keys.html for more info.
- loading_state (dict; optional):
Object that holds the loading state object coming from
dash-renderer.
`loading_state` is a dict with keys:
- component_name (string; optional):
Holds the name of the component that is loading.
- is_loading (boolean; optional):
Determines if the component is loading or not.
- prop_name (string; optional):
Holds which property is loading.
- name (string; optional):
The name of the control, which is submitted with the form data.
- persisted_props (list of a value equal to: 'checked's; default ['checked']):
Properties whose user interactions will persist after refreshing
the component or the page. Since only `value` is allowed this prop
can normally be ignored.
- persistence (boolean | string | number; optional):
Used to allow user interactions in this component to be persisted
when the component - or the page - is refreshed. If `persisted` is
truthy and hasn't changed from its previous value, a `value` that
the user has changed while using the app will keep that change, as
long as the new `value` also matches what was given originally.
Used in conjunction with `persistence_type`.
- persistence_type (a value equal to: 'local', 'session', 'memory'; default 'local'):
Where persisted user changes will be stored: memory: only kept in
memory, reset on page refresh. local: window.localStorage, data is
kept after the browser quit. session: window.sessionStorage, data
is cleared once the browser quit.
- style (dict; optional):
The style of the container (div)."""
@_explicitize_args
def __init__(self, id=Component.UNDEFINED, checked=Component.UNDEFINED, className=Component.UNDEFINED, style=Component.UNDEFINED, key=Component.UNDEFINED, loading_state=Component.UNDEFINED, persistence=Component.UNDEFINED, persisted_props=Component.UNDEFINED, persistence_type=Component.UNDEFINED, disabled=Component.UNDEFINED, name=Component.UNDEFINED, **kwargs):
self._prop_names = ['id', 'checked', 'className', 'disabled', 'key', 'loading_state', 'name', 'persisted_props', 'persistence', 'persistence_type', 'style']
self._type = 'RadioButton'
self._namespace = 'dash_bootstrap_components'
self._valid_wildcard_attributes = []
self.available_properties = ['id', 'checked', 'className', 'disabled', 'key', 'loading_state', 'name', 'persisted_props', 'persistence', 'persistence_type', 'style']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(RadioButton, self).__init__(**args)
| 44.241379 | 368 | 0.704599 |
a8dc10dc350e14ddf5e82f92838c50241637ef17
| 175 |
py
|
Python
|
wsc_django/wsc_django/apps/ws/routing.py
|
hzh595395786/wsc_django
|
c0a4de1a4479fe83f36108c1fdd4d68d18348b8d
|
[
"MIT"
] | 2 |
2021-02-07T05:56:46.000Z
|
2021-05-12T02:11:24.000Z
|
wsc_django/wsc_django/apps/ws/routing.py
|
hzh595395786/wsc_django
|
c0a4de1a4479fe83f36108c1fdd4d68d18348b8d
|
[
"MIT"
] | null | null | null |
wsc_django/wsc_django/apps/ws/routing.py
|
hzh595395786/wsc_django
|
c0a4de1a4479fe83f36108c1fdd4d68d18348b8d
|
[
"MIT"
] | null | null | null |
from django.urls import path
from ws import consumers
websocket_urlpatterns = [
path('ws/admin/websocket/', consumers.AdminWebSocketConsumer.as_asgi()), # 后台的websocket
]
| 29.166667 | 92 | 0.777143 |
ee158fe00076ccba5138b1d88bed8e364276e2aa
| 4,123 |
py
|
Python
|
tests/test_dict_indexer.py
|
micro-pixel/gnes
|
388d1ba718ec04eedaaff3ce34da43689c197ee7
|
[
"Apache-2.0"
] | 2 |
2020-07-05T03:51:44.000Z
|
2022-02-18T05:56:37.000Z
|
tests/test_dict_indexer.py
|
cmy9068/gnes
|
44a54be4c80108ac65b2450b4af8deded6da3339
|
[
"Apache-2.0"
] | null | null | null |
tests/test_dict_indexer.py
|
cmy9068/gnes
|
44a54be4c80108ac65b2450b4af8deded6da3339
|
[
"Apache-2.0"
] | 1 |
2020-10-28T15:07:36.000Z
|
2020-10-28T15:07:36.000Z
|
import os
import unittest
from shutil import rmtree
import grpc
from gnes.cli.parser import set_frontend_parser, set_preprocessor_parser, set_indexer_parser
from gnes.indexer.base import BaseIndexer
from gnes.indexer.doc.filesys import DirectoryIndexer
from gnes.preprocessor.base import BasePreprocessor
from gnes.proto import gnes_pb2, gnes_pb2_grpc, RequestGenerator
from gnes.service.base import SocketType, ServiceManager
from gnes.service.frontend import FrontendService
from gnes.service.indexer import IndexerService
from gnes.service.preprocessor import PreprocessorService
class TestDictIndexer(unittest.TestCase):
def setUp(self):
self.dirname = os.path.dirname(__file__)
self.video_path = os.path.join(self.dirname, 'videos')
self.video_bytes = [open(os.path.join(self.video_path, _), 'rb').read()
for _ in os.listdir(self.video_path)]
self.pipeline_name = 'pipe-gif'
self.pipeline_yml_path = os.path.join(self.dirname, 'yaml/%s.yml' % self.pipeline_name)
self.data_path = './test_chunkleveldb'
self.dump_path = os.path.join(self.dirname, 'indexer.bin')
self.init_db()
def test_pymode(self):
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
args = set_frontend_parser().parse_args([
'--dump_route', 'test.json'
])
p_args = set_preprocessor_parser().parse_args([
'--port_in', str(args.port_out),
'--port_out', '5531',
'--socket_in', str(SocketType.PULL_CONNECT),
'--socket_out', str(SocketType.PUSH_BIND),
'--yaml_path', 'SentSplitPreprocessor'
])
e_args = set_indexer_parser().parse_args([
'--port_in', str(p_args.port_out),
'--port_out', str(args.port_in),
'--socket_in', str(SocketType.PULL_CONNECT),
'--socket_out', str(SocketType.PUSH_CONNECT),
'--yaml_path', '!DictIndexer {gnes_config: {name: dummy_dict_indexer}}',
])
with ServiceManager(IndexerService, e_args), \
ServiceManager(PreprocessorService, p_args), \
FrontendService(args), \
grpc.insecure_channel('%s:%s' % (args.grpc_host, args.grpc_port),
options=[('grpc.max_send_message_length', 70 * 1024 * 1024),
('grpc.max_receive_message_length', 70 * 1024 * 1024)]) as channel:
stub = gnes_pb2_grpc.GnesRPCStub(channel)
all_bytes = []
with open(os.path.join(self.dirname, '26-doc-chinese.txt'), 'r', encoding='utf8') as fp:
for v in fp:
if v.strip():
all_bytes.append(v.encode())
for r in stub.StreamCall(RequestGenerator.index(all_bytes)):
print(r)
bi = BaseIndexer.load('dummy_dict_indexer.bin')
self.assertEqual(bi.num_docs, 26)
print(bi.query([0]))
def tearDown(self):
if os.path.exists(self.data_path):
rmtree(self.data_path)
if os.path.exists('dummy_dict_indexer.bin'):
os.remove('dummy_dict_indexer.bin')
def init_db(self):
self.db = DirectoryIndexer(self.data_path)
self.d = gnes_pb2.Document()
self.d.doc_id = 0
self.d.raw_bytes = self.video_bytes[0]
preprocess = BasePreprocessor.load_yaml(self.pipeline_yml_path)
preprocess.apply(self.d)
self.db.add(list(range(len(self.video_bytes))), [self.d])
self.assertEqual(self.db.num_docs, len(self.video_bytes))
def test_add_docs(self):
# self.init_db()
self.assertTrue(os.path.exists(os.path.join(self.data_path, str(self.d.doc_id))))
self.assertEqual(len(self.d.chunks), len(os.listdir(os.path.join(self.data_path, str(self.d.doc_id)))) - 1)
def test_query_docs(self):
# self.init_db()
query_list = [0, 1, 2]
res = self.db.query(query_list)
num_non_empty = sum(1 for d in res if d)
self.assertEqual(num_non_empty, 1)
| 38.896226 | 115 | 0.626243 |
1878cc5e4e563c8eb4f6f5a2ea4d879c3f206220
| 3,006 |
py
|
Python
|
tests/pytests/schedtester.py
|
vojtechcima/rain
|
39c4b1de952da6673e9eb3247157b9b29b028942
|
[
"MIT"
] | null | null | null |
tests/pytests/schedtester.py
|
vojtechcima/rain
|
39c4b1de952da6673e9eb3247157b9b29b028942
|
[
"MIT"
] | null | null | null |
tests/pytests/schedtester.py
|
vojtechcima/rain
|
39c4b1de952da6673e9eb3247157b9b29b028942
|
[
"MIT"
] | null | null | null |
from rain.client import blob, remote
class Governor:
def __init__(self, cpus):
self.cpus = cpus
self.governor_id = None
class Scenario:
def __init__(self, test_env, governors):
assert all(w.governor_id is None for w in governors)
self.governors = tuple(governors)
self.task_expected_placement = {}
test_env.start(governor_defs=[w.cpus for w in governors])
self.client = test_env.client
ws = list(self.governors)
for i, governor_info in enumerate(self.client.get_server_info()["governors"]):
cpus = int(governor_info["resources"]["cpus"])
for w in ws:
if w.cpus == cpus:
break
else:
raise Exception("Requested governor not found")
ws.remove(w)
w.governor_id = governor_info["governor_id"]
assert not ws
self.session = self.client.new_session()
def new_object(self, governors, size):
if isinstance(governors, Governor):
governors = (governors,)
assert all(w.governor_id for w in governors)
with self.session.bind_only():
obj = blob(b"")
obj.attributes["__test"] = {
"governors": [w.governor_id for w in governors],
"size": size
}
return obj
# TODO: Configurable size of output, now output has zero size
def new_task(self, inputs, cpus=1, expect_governor=None, label=None):
with self.session.bind_only():
task = testing_task(inputs)
task.test_label = label
print("Creating task {} as {}".format(label, task))
if cpus != 1:
task.attributes["resources"]["cpus"] = cpus
if expect_governor:
if isinstance(expect_governor, Governor):
expect_governor = (expect_governor,)
self.task_expected_placement[task] = expect_governor
return task
def run(self):
with self.session:
self.session.submit()
self.session.wait_all()
self.session.update(list(self.task_expected_placement))
error = False
for task, expected_governors in self.task_expected_placement.items():
placement = task.attributes["info"]["governor"]
print("Task {} computed on {}".format(task.test_label, placement))
if placement not in [w.governor_id for w in expected_governors]:
print("!!! Task: ",
task.id,
"was computed on",
placement,
"but expected on",
[w.governor_id for w in expected_governors])
error = True
if error:
raise Exception("Scenario failed, see stdout for more details")
@remote()
def testing_task(ctx, *args):
return b""
| 34.953488 | 86 | 0.55489 |
b4c2b2a14dc816bf2e144e461619bb949566d948
| 1,610 |
py
|
Python
|
ocr/code/neural_network_design.py
|
kennywbin/500lines
|
e72f05bac2087f368251d3f263ae325c268e5171
|
[
"CC-BY-3.0"
] | null | null | null |
ocr/code/neural_network_design.py
|
kennywbin/500lines
|
e72f05bac2087f368251d3f263ae325c268e5171
|
[
"CC-BY-3.0"
] | null | null | null |
ocr/code/neural_network_design.py
|
kennywbin/500lines
|
e72f05bac2087f368251d3f263ae325c268e5171
|
[
"CC-BY-3.0"
] | null | null | null |
"""
In order to decide how many hidden nodes the hidden layer should have,
split up the data set into training and testing data and create networks
with various hidden node counts (5, 10, 15, ... 45), testing the performance
for each.
The best-performing node count is used in the actual system. If multiple counts
perform similarly, choose the smallest count for a smaller network with fewer computations.
"""
import numpy as np
from ocr import OCRNeuralNetwork
from sklearn.cross_validation import train_test_split
def test(data_matrix, data_labels, test_indices, nn):
avg_sum = 0
for j in xrange(100):
correct_guess_count = 0
for i in test_indices:
test = data_matrix[i]
prediction = nn.predict(test)
if data_labels[i] == prediction:
correct_guess_count += 1
avg_sum += (correct_guess_count / float(len(test_indices)))
return avg_sum / 100
# Load data samples and labels into matrix
data_matrix = np.loadtxt(open('data.csv', 'rb'), delimiter = ',').tolist()
data_labels = np.loadtxt(open('dataLabels.csv', 'rb')).tolist()
# Create training and testing sets.
train_indices, test_indices = train_test_split(list(range(5000)))
print "PERFORMANCE"
print "-----------"
# Try various number of hidden nodes and see what performs best
for i in xrange(5, 50, 5):
nn = OCRNeuralNetwork(i, data_matrix, data_labels, train_indices, False)
performance = str(test(data_matrix, data_labels, test_indices, nn))
print "{i} Hidden Nodes: {val}".format(i=i, val=performance)
| 37.44186 | 92 | 0.692547 |
b7c3b69c0360c1fd5b988a3247654f1563c63a3c
| 416 |
py
|
Python
|
sfeprapy/func/fire_external_ec.py
|
fsepy/sfeprapy
|
0b1c11b30034793e1231f599cf41e496a9ec56aa
|
[
"MIT"
] | 4 |
2019-08-22T14:30:43.000Z
|
2020-08-30T08:20:48.000Z
|
sfeprapy/func/fire_external_ec.py
|
fsepy/sfeprapy
|
0b1c11b30034793e1231f599cf41e496a9ec56aa
|
[
"MIT"
] | 3 |
2019-10-26T11:31:58.000Z
|
2019-11-23T11:27:11.000Z
|
sfeprapy/func/fire_external_ec.py
|
fsepy/sfeprapy
|
0b1c11b30034793e1231f599cf41e496a9ec56aa
|
[
"MIT"
] | null | null | null |
import numpy as np
def fire(time, temperature_initial):
time /= 1200.0 # convert time from seconds to hours
temperature_initial -= 273.15 # convert ambient temperature from kelvin to celsius
temperature = (
660 * (1 - 0.687 * np.exp(-0.32 * time) - 0.313 * np.exp(-3.8 * time))
+ temperature_initial
)
return temperature + 273.15 # convert temperature from celsius to kelvin
| 34.666667 | 87 | 0.661058 |
50f9c75dc26b4c73e2a6300cbd8c1873c8268c66
| 582 |
py
|
Python
|
changes/api/serializer/models/project.py
|
alex/changes
|
69a17b4c639e7082a75d037384ccb68ead3a0b4b
|
[
"Apache-2.0"
] | 1 |
2015-11-08T13:00:44.000Z
|
2015-11-08T13:00:44.000Z
|
changes/api/serializer/models/project.py
|
alex/changes
|
69a17b4c639e7082a75d037384ccb68ead3a0b4b
|
[
"Apache-2.0"
] | null | null | null |
changes/api/serializer/models/project.py
|
alex/changes
|
69a17b4c639e7082a75d037384ccb68ead3a0b4b
|
[
"Apache-2.0"
] | null | null | null |
from changes.api.serializer import Serializer, register
from changes.models.project import Project
from changes.utils.http import build_uri
@register(Project)
class ProjectSerializer(Serializer):
def serialize(self, instance, attrs):
return {
'id': instance.id.hex,
'slug': instance.slug,
'name': instance.name,
'repository': {
'id': instance.repository_id,
},
'dateCreated': instance.date_created,
'link': build_uri('/projects/{0}/'.format(instance.slug)),
}
| 30.631579 | 70 | 0.606529 |
4b633cb758a2f05189601fe27426ac24b046e189
| 38 |
py
|
Python
|
src/gutenberg_downloader.py
|
elijah-rou/InvictusMicroservice
|
2f200dccdd28d32c8fb3cc524e128ba13583b7f1
|
[
"MIT"
] | null | null | null |
src/gutenberg_downloader.py
|
elijah-rou/InvictusMicroservice
|
2f200dccdd28d32c8fb3cc524e128ba13583b7f1
|
[
"MIT"
] | null | null | null |
src/gutenberg_downloader.py
|
elijah-rou/InvictusMicroservice
|
2f200dccdd28d32c8fb3cc524e128ba13583b7f1
|
[
"MIT"
] | null | null | null |
import nltk
nltk.download('gutenberg')
| 19 | 26 | 0.815789 |
717883bd1be70eb1ff0f859fb851b667b6af22d0
| 1,069 |
py
|
Python
|
Python/Fundamentals/Lists lab-exercise(Advanced)/The Office.py
|
EduardV777/Softuni-Python-Exercises
|
79db667028aea7dfecb3dbbd834c752180c50f44
|
[
"Unlicense"
] | null | null | null |
Python/Fundamentals/Lists lab-exercise(Advanced)/The Office.py
|
EduardV777/Softuni-Python-Exercises
|
79db667028aea7dfecb3dbbd834c752180c50f44
|
[
"Unlicense"
] | null | null | null |
Python/Fundamentals/Lists lab-exercise(Advanced)/The Office.py
|
EduardV777/Softuni-Python-Exercises
|
79db667028aea7dfecb3dbbd834c752180c50f44
|
[
"Unlicense"
] | null | null | null |
happinessRates=input(); improvementFactor=int(input())
happinessRatesList=[]
k=0
while k<len(happinessRates):
rate=""
if happinessRates[k]!=" ":
for j in range(k,len(happinessRates)):
if happinessRates[j]!=" ":
rate+=happinessRates[j]
k+=1
else:
k+=1
break
rate=int(rate)
happinessRatesList.append(rate)
happinessRatesIncreased=list(map(lambda x: x*improvementFactor, happinessRatesList))
avgHappiness=0
for j in range(0,len(happinessRatesIncreased)):
avgHappiness+=happinessRatesIncreased[j]
avgHappiness/=len(happinessRatesIncreased)
totalEmployees=len(happinessRatesIncreased)
happyEmployees=list(filter(lambda y: y>=avgHappiness, happinessRatesIncreased))
if len(happyEmployees)>=len(happinessRatesIncreased)//2:
print(f"Score: {len(happyEmployees)}/{len(happinessRatesIncreased)}. Employees are happy!")
else:
print(f"Score: {len(happyEmployees)}/{len(happinessRatesIncreased)}. Employees are not happy!")
| 39.592593 | 100 | 0.678204 |
d5555beab42391bb32d833de8ce3662404f33c65
| 38,461 |
py
|
Python
|
python/cudf/cudf/tests/test_multiindex.py
|
manopapad/cudf
|
7c8961459e67f0a82f79327a5fd87ef6887ecc83
|
[
"Apache-2.0"
] | null | null | null |
python/cudf/cudf/tests/test_multiindex.py
|
manopapad/cudf
|
7c8961459e67f0a82f79327a5fd87ef6887ecc83
|
[
"Apache-2.0"
] | null | null | null |
python/cudf/cudf/tests/test_multiindex.py
|
manopapad/cudf
|
7c8961459e67f0a82f79327a5fd87ef6887ecc83
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
"""
Test related to MultiIndex
"""
import itertools
import operator
import re
import cupy as cp
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core.column import as_column
from cudf.core.index import as_index
from cudf.tests.utils import assert_eq, assert_exceptions_equal, assert_neq
def test_multiindex_levels_codes_validation():
levels = [["a", "b"], ["c", "d"]]
# Codes not a sequence of sequences
assert_exceptions_equal(
lfunc=pd.MultiIndex,
rfunc=cudf.MultiIndex,
lfunc_args_and_kwargs=([levels, [0, 1]],),
rfunc_args_and_kwargs=([levels, [0, 1]],),
compare_error_message=False,
)
# Codes don't match levels
assert_exceptions_equal(
lfunc=pd.MultiIndex,
rfunc=cudf.MultiIndex,
lfunc_args_and_kwargs=([levels, [[0], [1], [1]]],),
rfunc_args_and_kwargs=([levels, [[0], [1], [1]]],),
compare_error_message=False,
)
# Largest code greater than number of levels
assert_exceptions_equal(
lfunc=pd.MultiIndex,
rfunc=cudf.MultiIndex,
lfunc_args_and_kwargs=([levels, [[0, 1], [0, 2]]],),
rfunc_args_and_kwargs=([levels, [[0, 1], [0, 2]]],),
compare_error_message=False,
)
# Unequal code lengths
assert_exceptions_equal(
lfunc=pd.MultiIndex,
rfunc=cudf.MultiIndex,
lfunc_args_and_kwargs=([levels, [[0, 1], [0]]],),
rfunc_args_and_kwargs=([levels, [[0, 1], [0]]],),
compare_error_message=False,
)
# Didn't pass levels and codes
assert_exceptions_equal(
lfunc=pd.MultiIndex, rfunc=cudf.MultiIndex, compare_error_message=False
)
# Didn't pass non zero levels and codes
assert_exceptions_equal(
lfunc=pd.MultiIndex,
rfunc=cudf.MultiIndex,
lfunc_args_and_kwargs=([[], []],),
rfunc_args_and_kwargs=([[], []],),
)
def test_multiindex_construction():
levels = [["a", "b"], ["c", "d"]]
codes = [[0, 1], [1, 0]]
pmi = pd.MultiIndex(levels, codes)
mi = cudf.MultiIndex(levels, codes)
assert_eq(pmi, mi)
pmi = pd.MultiIndex(levels, codes)
mi = cudf.MultiIndex(levels=levels, codes=codes)
assert_eq(pmi, mi)
def test_multiindex_types():
codes = [[0, 1], [1, 0]]
levels = [[0, 1], [2, 3]]
pmi = pd.MultiIndex(levels, codes)
mi = cudf.MultiIndex(levels, codes)
assert_eq(pmi, mi)
levels = [[1.2, 2.1], [1.3, 3.1]]
pmi = pd.MultiIndex(levels, codes)
mi = cudf.MultiIndex(levels, codes)
assert_eq(pmi, mi)
levels = [["a", "b"], ["c", "d"]]
pmi = pd.MultiIndex(levels, codes)
mi = cudf.MultiIndex(levels, codes)
assert_eq(pmi, mi)
def test_multiindex_df_assignment():
pdf = pd.DataFrame({"x": [1, 2, 3]})
gdf = cudf.from_pandas(pdf)
pdf.index = pd.MultiIndex([["a", "b"], ["c", "d"]], [[0, 1, 0], [1, 0, 1]])
gdf.index = cudf.MultiIndex(
levels=[["a", "b"], ["c", "d"]], codes=[[0, 1, 0], [1, 0, 1]]
)
assert_eq(pdf, gdf)
def test_multiindex_series_assignment():
ps = pd.Series([1, 2, 3])
gs = cudf.from_pandas(ps)
ps.index = pd.MultiIndex([["a", "b"], ["c", "d"]], [[0, 1, 0], [1, 0, 1]])
gs.index = cudf.MultiIndex(
levels=[["a", "b"], ["c", "d"]], codes=[[0, 1, 0], [1, 0, 1]]
)
assert_eq(ps, gs)
def test_string_index():
from cudf.core.index import StringIndex
pdf = pd.DataFrame(np.random.rand(5, 5))
gdf = cudf.from_pandas(pdf)
stringIndex = ["a", "b", "c", "d", "e"]
pdf.index = stringIndex
gdf.index = stringIndex
assert_eq(pdf, gdf)
stringIndex = np.array(["a", "b", "c", "d", "e"])
pdf.index = stringIndex
gdf.index = stringIndex
assert_eq(pdf, gdf)
stringIndex = StringIndex(["a", "b", "c", "d", "e"], name="name")
pdf.index = stringIndex.to_pandas()
gdf.index = stringIndex
assert_eq(pdf, gdf)
stringIndex = as_index(as_column(["a", "b", "c", "d", "e"]), name="name")
pdf.index = stringIndex.to_pandas()
gdf.index = stringIndex
assert_eq(pdf, gdf)
def test_multiindex_row_shape():
pdf = pd.DataFrame(np.random.rand(0, 5))
gdf = cudf.from_pandas(pdf)
pdfIndex = pd.MultiIndex([["a", "b", "c"]], [[0]])
pdfIndex.names = ["alpha"]
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex, gdfIndex)
assert_exceptions_equal(
lfunc=operator.setitem,
rfunc=operator.setitem,
lfunc_args_and_kwargs=([], {"a": pdf, "b": "index", "c": pdfIndex}),
rfunc_args_and_kwargs=([], {"a": gdf, "b": "index", "c": gdfIndex}),
)
@pytest.fixture
def pdf():
return pd.DataFrame(np.random.rand(7, 5))
@pytest.fixture
def gdf(pdf):
return cudf.from_pandas(pdf)
@pytest.fixture
def pdfIndex():
pdfIndex = pd.MultiIndex(
[
["a", "b", "c"],
["house", "store", "forest"],
["clouds", "clear", "storm"],
["fire", "smoke", "clear"],
[
np.datetime64("2001-01-01", "ns"),
np.datetime64("2002-01-01", "ns"),
np.datetime64("2003-01-01", "ns"),
],
],
[
[0, 0, 0, 0, 1, 1, 2],
[1, 1, 1, 1, 0, 0, 2],
[0, 0, 2, 2, 2, 0, 1],
[0, 0, 0, 1, 2, 0, 1],
[1, 0, 1, 2, 0, 0, 1],
],
)
pdfIndex.names = ["alpha", "location", "weather", "sign", "timestamp"]
return pdfIndex
@pytest.fixture
def pdfIndexNulls():
pdfIndex = pd.MultiIndex(
[
["a", "b", "c"],
["house", "store", "forest"],
["clouds", "clear", "storm"],
],
[
[0, 0, 0, -1, 1, 1, 2],
[1, -1, 1, 1, 0, 0, -1],
[-1, 0, 2, 2, 2, 0, 1],
],
)
pdfIndex.names = ["alpha", "location", "weather"]
return pdfIndex
def test_from_pandas(pdf, pdfIndex):
pdf.index = pdfIndex
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
def test_multiindex_transpose(pdf, pdfIndex):
pdf.index = pdfIndex
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.transpose(), gdf.transpose())
def test_from_pandas_series():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
).set_index(["a", "b"])
result = cudf.from_pandas(pdf)
assert_eq(pdf, result)
test_pdf = pdf["c"]
result = cudf.from_pandas(test_pdf)
assert_eq(test_pdf, result)
def test_series_multiindex(pdfIndex):
ps = pd.Series(np.random.rand(7))
gs = cudf.from_pandas(ps)
ps.index = pdfIndex
gs.index = cudf.from_pandas(pdfIndex)
assert_eq(ps, gs)
def test_multiindex_take(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_eq(pdf.index.take([0]), gdf.index.take([0]))
assert_eq(pdf.index.take(np.array([0])), gdf.index.take(np.array([0])))
from cudf import Series
assert_eq(pdf.index.take(pd.Series([0])), gdf.index.take(Series([0])))
assert_eq(pdf.index.take([0, 1]), gdf.index.take([0, 1]))
assert_eq(
pdf.index.take(np.array([0, 1])), gdf.index.take(np.array([0, 1]))
)
assert_eq(
pdf.index.take(pd.Series([0, 1])), gdf.index.take(Series([0, 1]))
)
def test_multiindex_getitem(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_eq(pdf.index[0], gdf.index[0])
@pytest.mark.parametrize(
"key_tuple",
[
# return 2 rows, 0 remaining keys = dataframe with entire index
("a", "store", "clouds", "fire"),
(("a", "store", "clouds", "fire"), slice(None)),
# return 2 rows, 1 remaining key = dataframe with n-k index columns
("a", "store", "storm"),
(("a", "store", "storm"), slice(None)),
# return 2 rows, 2 remaining keys = dataframe with n-k index columns
("a", "store"),
(("a", "store"), slice(None)),
# return 2 rows, n-1 remaining keys = dataframe with n-k index columns
("a",),
(("a",), slice(None)),
# return 1 row, 0 remaining keys = dataframe with entire index
("a", "store", "storm", "smoke"),
(("a", "store", "storm", "smoke"), slice(None)),
# return 1 row and 1 remaining key = series
("c", "forest", "clear"),
(("c", "forest", "clear"), slice(None)),
],
)
def test_multiindex_loc(pdf, gdf, pdfIndex, key_tuple):
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex, gdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_eq(pdf.loc[key_tuple], gdf.loc[key_tuple])
def test_multiindex_loc_slice(pdf, gdf, pdfIndex):
gdf = cudf.from_pandas(pdf)
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_eq(
pdf.loc[("a", "store"):("b", "house")],
gdf.loc[("a", "store"):("b", "house")],
)
def test_multiindex_loc_then_column(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex, gdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_eq(
pdf.loc[("a", "store", "clouds", "fire"), :][0],
gdf.loc[("a", "store", "clouds", "fire"), :][0],
)
def test_multiindex_loc_rows_0(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_exceptions_equal(
lfunc=pdf.loc.__getitem__,
rfunc=gdf.loc.__getitem__,
lfunc_args_and_kwargs=([(("d",), slice(None, None, None))],),
rfunc_args_and_kwargs=([(("d",), slice(None, None, None))],),
)
def test_multiindex_loc_rows_1_2_key(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
print(pdf.loc[("c", "forest"), :])
print(gdf.loc[("c", "forest"), :].to_pandas())
assert_eq(pdf.loc[("c", "forest"), :], gdf.loc[("c", "forest"), :])
def test_multiindex_loc_rows_1_1_key(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
print(pdf.loc[("c",), :])
print(gdf.loc[("c",), :].to_pandas())
assert_eq(pdf.loc[("c",), :], gdf.loc[("c",), :])
def test_multiindex_column_shape():
pdf = pd.DataFrame(np.random.rand(5, 0))
gdf = cudf.from_pandas(pdf)
pdfIndex = pd.MultiIndex([["a", "b", "c"]], [[0]])
pdfIndex.names = ["alpha"]
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex, gdfIndex)
assert_exceptions_equal(
lfunc=operator.setitem,
rfunc=operator.setitem,
lfunc_args_and_kwargs=([], {"a": pdf, "b": "columns", "c": pdfIndex}),
rfunc_args_and_kwargs=([], {"a": gdf, "b": "columns", "c": gdfIndex}),
)
@pytest.mark.parametrize(
"query",
[
("a", "store", "clouds", "fire"),
("a", "store", "storm", "smoke"),
("a", "store"),
("b", "house"),
("a", "store", "storm"),
("a",),
("c", "forest", "clear"),
],
)
def test_multiindex_columns(pdf, gdf, pdfIndex, query):
pdf = pdf.T
gdf = cudf.from_pandas(pdf)
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex, gdfIndex)
pdf.columns = pdfIndex
gdf.columns = gdfIndex
assert_eq(pdf[query], gdf[query])
def test_multiindex_from_tuples():
arrays = [["a", "a", "b", "b"], ["house", "store", "house", "store"]]
tuples = list(zip(*arrays))
pmi = pd.MultiIndex.from_tuples(tuples)
gmi = cudf.MultiIndex.from_tuples(tuples)
assert_eq(pmi, gmi)
def test_multiindex_from_dataframe():
if not hasattr(pd.MultiIndex([[]], [[]]), "codes"):
pytest.skip()
pdf = pd.DataFrame(
[["a", "house"], ["a", "store"], ["b", "house"], ["b", "store"]]
)
gdf = cudf.from_pandas(pdf)
pmi = pd.MultiIndex.from_frame(pdf, names=["alpha", "location"])
gmi = cudf.MultiIndex.from_frame(gdf, names=["alpha", "location"])
assert_eq(pmi, gmi)
@pytest.mark.parametrize(
"arrays",
[
[["a", "a", "b", "b"], ["house", "store", "house", "store"]],
[["a", "n", "n"] * 1000, ["house", "store", "house", "store"]],
[
["a", "n", "n"],
["house", "store", "house", "store", "store"] * 1000,
],
[
["a", "a", "n"] * 50,
["house", "store", "house", "store", "store"] * 100,
],
],
)
def test_multiindex_from_product(arrays):
pmi = pd.MultiIndex.from_product(arrays, names=["alpha", "location"])
gmi = cudf.MultiIndex.from_product(arrays, names=["alpha", "location"])
assert_eq(pmi, gmi)
def test_multiindex_index_and_columns():
gdf = cudf.DataFrame()
gdf["x"] = np.random.randint(0, 5, 5)
gdf["y"] = np.random.randint(0, 5, 5)
pdf = gdf.to_pandas()
mi = cudf.MultiIndex(
levels=[[0, 1, 2], [3, 4]],
codes=[[0, 0, 1, 1, 2], [0, 1, 0, 1, 1]],
names=["x", "y"],
)
gdf.index = mi
mc = cudf.MultiIndex(
levels=[["val"], ["mean", "min"]], codes=[[0, 0], [0, 1]]
)
gdf.columns = mc
pdf.index = mi.to_pandas()
pdf.columns = mc.to_pandas()
assert_eq(pdf, gdf)
def test_multiindex_multiple_groupby():
pdf = pd.DataFrame(
{
"a": [4, 17, 4, 9, 5],
"b": [1, 4, 4, 3, 2],
"x": np.random.normal(size=5),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
pdg = pdf.groupby(["a", "b"]).sum()
gdg = gdf.groupby(["a", "b"]).sum()
assert_eq(pdg, gdg)
pdg = pdf.groupby(["a", "b"]).x.sum()
gdg = gdf.groupby(["a", "b"]).x.sum()
assert_eq(pdg, gdg)
@pytest.mark.parametrize(
"func",
[
lambda df: df.groupby(["x", "y"]).z.sum(),
lambda df: df.groupby(["x", "y"]).sum(),
],
)
def test_multi_column(func):
pdf = pd.DataFrame(
{
"x": np.random.randint(0, 5, size=1000),
"y": np.random.randint(0, 10, size=1000),
"z": np.random.normal(size=1000),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
a = func(pdf)
b = func(gdf)
assert_eq(a, b)
def test_multiindex_equality():
# mi made from groupby
# mi made manually to be identical
# are they equal?
gdf = cudf.DataFrame(
{"x": [1, 5, 3, 4, 1], "y": [1, 1, 2, 2, 5], "z": [0, 1, 0, 1, 0]}
)
mi1 = gdf.groupby(["x", "y"]).mean().index
mi2 = cudf.MultiIndex(
levels=[[1, 3, 4, 5], [1, 2, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
)
assert_eq(mi1, mi2)
# mi made from two groupbys, are they equal?
mi2 = gdf.groupby(["x", "y"]).max().index
assert_eq(mi1, mi2)
# mi made manually twice are they equal?
mi1 = cudf.MultiIndex(
levels=[[1, 3, 4, 5], [1, 2, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
)
mi2 = cudf.MultiIndex(
levels=[[1, 3, 4, 5], [1, 2, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
)
assert_eq(mi1, mi2)
# mi made from different groupbys are they not equal?
mi1 = gdf.groupby(["x", "y"]).mean().index
mi2 = gdf.groupby(["x", "z"]).mean().index
assert_neq(mi1, mi2)
# mi made from different manuals are they not equal?
mi1 = cudf.MultiIndex(
levels=[[1, 3, 4, 5], [1, 2, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
)
mi2 = cudf.MultiIndex(
levels=[[0, 3, 4, 5], [1, 2, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
)
assert_neq(mi1, mi2)
def test_multiindex_equals():
# mi made from groupby
# mi made manually to be identical
# are they equal?
gdf = cudf.DataFrame(
{"x": [1, 5, 3, 4, 1], "y": [1, 1, 2, 2, 5], "z": [0, 1, 0, 1, 0]}
)
mi1 = gdf.groupby(["x", "y"]).mean().index
mi2 = cudf.MultiIndex(
levels=[[1, 3, 4, 5], [1, 2, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
)
assert_eq(mi1.equals(mi2), True)
# mi made from two groupbys, are they equal?
mi2 = gdf.groupby(["x", "y"]).max().index
assert_eq(mi1.equals(mi2), True)
# mi made manually twice are they equal?
mi1 = cudf.MultiIndex(
levels=[[1, 3, 4, 5], [1, 2, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
)
mi2 = cudf.MultiIndex(
levels=[[1, 3, 4, 5], [1, 2, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
)
assert_eq(mi1.equals(mi2), True)
# mi made from different groupbys are they not equal?
mi1 = gdf.groupby(["x", "y"]).mean().index
mi2 = gdf.groupby(["x", "z"]).mean().index
assert_eq(mi1.equals(mi2), False)
# mi made from different manuals are they not equal?
mi1 = cudf.MultiIndex(
levels=[[1, 3, 4, 5], [1, 2, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
)
mi2 = cudf.MultiIndex(
levels=[[0, 3, 4, 5], [1, 2, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
)
assert_eq(mi1.equals(mi2), False)
@pytest.mark.parametrize(
"data",
[
{
"Date": [
"2020-08-27",
"2020-08-28",
"2020-08-31",
"2020-08-27",
"2020-08-28",
"2020-08-31",
"2020-08-27",
"2020-08-28",
"2020-08-31",
],
"Close": [
3400.00,
3401.80,
3450.96,
226.58,
228.91,
225.53,
505.13,
525.91,
534.98,
],
"Symbol": [
"AMZN",
"AMZN",
"AMZN",
"MSFT",
"MSFT",
"MSFT",
"NVDA",
"NVDA",
"NVDA",
],
}
],
)
@pytest.mark.parametrize(
"levels",
[[["2000-01-01", "2000-01-02", "2000-01-03"], ["A", "B", "C"]], None],
)
@pytest.mark.parametrize(
"codes", [[[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], None]
)
@pytest.mark.parametrize("names", [["X", "Y"]])
def test_multiindex_copy_sem(data, levels, codes, names):
"""Test semantic equality for MultiIndex.copy
"""
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
gdf = gdf.groupby(["Date", "Symbol"]).mean()
pdf = pdf.groupby(["Date", "Symbol"]).mean()
gmi = gdf.index
gmi_copy = gmi.copy(levels=levels, codes=codes, names=names)
pmi = pdf.index
pmi_copy = pmi.copy(levels=levels, codes=codes, names=names)
for glv, plv in zip(gmi_copy.levels, pmi_copy.levels):
assert all(glv.values_host == plv.values)
for (_, gval), pval in zip(gmi.codes._data._data.items(), pmi.codes):
assert all(gval.values_host == pval.astype(np.int64))
assert_eq(gmi_copy.names, pmi_copy.names)
# Test same behavior when used on DataFrame
gdf.index = gmi_copy
pdf.index = pmi_copy
assert gdf.__repr__() == pdf.__repr__()
@pytest.mark.parametrize(
"data",
[
{
"Date": [
"2020-08-27",
"2020-08-28",
"2020-08-31",
"2020-08-27",
"2020-08-28",
"2020-08-31",
"2020-08-27",
"2020-08-28",
"2020-08-31",
],
"Close": [
3400.00,
3401.80,
3450.96,
226.58,
228.91,
225.53,
505.13,
525.91,
534.98,
],
"Symbol": [
"AMZN",
"AMZN",
"AMZN",
"MSFT",
"MSFT",
"MSFT",
"NVDA",
"NVDA",
"NVDA",
],
},
cudf.MultiIndex(
levels=[[1001, 1002], [2001, 2002]],
codes=[[1, 1, 0, 0], [0, 1, 0, 1]],
names=["col1", "col2"],
),
],
)
@pytest.mark.parametrize("deep", [True, False])
def test_multiindex_copy_deep(data, deep):
"""Test memory idendity for deep copy
Case1: Constructed from GroupBy, StringColumns
Case2: Constrcuted from MultiIndex, NumericColumns
"""
same_ref = not deep
if isinstance(data, dict):
import operator
from functools import reduce
gdf = cudf.DataFrame(data)
mi1 = gdf.groupby(["Date", "Symbol"]).mean().index
mi2 = mi1.copy(deep=deep)
lchildren = [col.children for _, col in mi1._data.items()]
rchildren = [col.children for _, col in mi2._data.items()]
# Flatten
lchildren = reduce(operator.add, lchildren)
rchildren = reduce(operator.add, rchildren)
lptrs = [child.base_data.ptr for child in lchildren]
rptrs = [child.base_data.ptr for child in rchildren]
assert all([(x == y) is same_ref for x, y in zip(lptrs, rptrs)])
elif isinstance(data, cudf.MultiIndex):
mi1 = data
mi2 = mi1.copy(deep=deep)
# Assert ._levels idendity
lptrs = [lv._data._data[None].base_data.ptr for lv in mi1._levels]
rptrs = [lv._data._data[None].base_data.ptr for lv in mi2._levels]
assert all([(x == y) is same_ref for x, y in zip(lptrs, rptrs)])
# Assert ._codes idendity
lptrs = [c.base_data.ptr for _, c in mi1._codes._data.items()]
rptrs = [c.base_data.ptr for _, c in mi2._codes._data.items()]
assert all([(x == y) is same_ref for x, y in zip(lptrs, rptrs)])
# Assert ._data idendity
lptrs = [d.base_data.ptr for _, d in mi1._data.items()]
rptrs = [d.base_data.ptr for _, d in mi2._data.items()]
assert all([(x == y) is same_ref for x, y in zip(lptrs, rptrs)])
@pytest.mark.parametrize(
"iloc_rows",
[
0,
1,
slice(None, 0),
slice(None, 1),
slice(0, 1),
slice(1, 2),
slice(0, 2),
slice(0, None),
slice(1, None),
],
)
@pytest.mark.parametrize(
"iloc_columns",
[
0,
1,
slice(None, 0),
slice(None, 1),
slice(0, 1),
slice(1, 2),
slice(0, 2),
slice(0, None),
slice(1, None),
],
)
def test_multiindex_iloc(pdf, gdf, pdfIndex, iloc_rows, iloc_columns):
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex, gdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
presult = pdf.iloc[iloc_rows, iloc_columns]
gresult = gdf.iloc[iloc_rows, iloc_columns]
if isinstance(gresult, cudf.DataFrame):
assert_eq(
presult, gresult, check_index_type=False, check_column_type=False
)
else:
assert_eq(presult, gresult, check_index_type=False, check_dtype=False)
@pytest.mark.parametrize(
"iloc_rows",
[
0,
1,
slice(None, 0),
slice(None, 1),
slice(0, 1),
slice(1, 2),
slice(0, 2),
slice(0, None),
slice(1, None),
],
)
@pytest.mark.parametrize(
"iloc_columns",
[
0,
1,
slice(None, 0),
slice(None, 1),
slice(0, 1),
slice(1, 2),
slice(0, 2),
slice(0, None),
slice(1, None),
],
)
def test_multicolumn_iloc(pdf, gdf, pdfIndex, iloc_rows, iloc_columns):
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex, gdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
pdf = pdf.T
gdf = gdf.T
presult = pdf.iloc[iloc_rows, iloc_columns]
gresult = gdf.iloc[iloc_rows, iloc_columns]
if hasattr(gresult, "name") and isinstance(gresult.name, tuple):
name = gresult.name[len(gresult.name) - 1]
if isinstance(name, str) and "cudf" in name:
gresult.name = name
if isinstance(presult, pd.DataFrame):
assert_eq(
presult, gresult, check_index_type=False, check_column_type=False
)
else:
assert_eq(presult, gresult, check_index_type=False, check_dtype=False)
def test_multicolumn_item():
gdf = cudf.DataFrame(
{"x": np.arange(10), "y": np.arange(10), "z": np.arange(10)}
)
gdg = gdf.groupby(["x", "y"]).min()
gdgT = gdg.T
pdgT = gdgT.to_pandas()
assert_eq(gdgT[(0, 0)], pdgT[(0, 0)])
def test_multiindex_to_frame(pdfIndex, pdfIndexNulls):
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex.to_frame(), gdfIndex.to_frame())
gdfIndex = cudf.from_pandas(pdfIndexNulls)
assert_eq(
pdfIndexNulls.to_frame().fillna("nan"),
gdfIndex.to_frame().fillna("nan"),
)
def test_multiindex_groupby_to_frame():
gdf = cudf.DataFrame(
{"x": [1, 5, 3, 4, 1], "y": [1, 1, 2, 2, 5], "z": [0, 1, 0, 1, 0]}
)
pdf = gdf.to_pandas()
gdg = gdf.groupby(["x", "y"]).count()
pdg = pdf.groupby(["x", "y"]).count()
assert_eq(pdg.index.to_frame(), gdg.index.to_frame())
def test_multiindex_reset_index(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_eq(pdf.reset_index(), gdf.reset_index())
def test_multiindex_groupby_reset_index():
gdf = cudf.DataFrame(
{"x": [1, 5, 3, 4, 1], "y": [1, 1, 2, 2, 5], "z": [0, 1, 0, 1, 0]}
)
pdf = gdf.to_pandas()
gdg = gdf.groupby(["x", "y"]).sum()
pdg = pdf.groupby(["x", "y"]).sum()
assert_eq(pdg.reset_index(), gdg.reset_index())
def test_multicolumn_reset_index():
gdf = cudf.DataFrame({"x": [1, 5, 3, 4, 1], "y": [1, 1, 2, 2, 5]})
pdf = gdf.to_pandas()
gdg = gdf.groupby(["x"]).agg({"y": ["count", "mean"]})
pdg = pdf.groupby(["x"]).agg({"y": ["count", "mean"]})
assert_eq(pdg.reset_index(), gdg.reset_index(), check_dtype=False)
gdg = gdf.groupby(["x"]).agg({"y": ["count"]})
pdg = pdf.groupby(["x"]).agg({"y": ["count"]})
assert_eq(pdg.reset_index(), gdg.reset_index(), check_dtype=False)
gdg = gdf.groupby(["x"]).agg({"y": "count"})
pdg = pdf.groupby(["x"]).agg({"y": "count"})
assert_eq(pdg.reset_index(), gdg.reset_index(), check_dtype=False)
def test_multiindex_multicolumn_reset_index():
gdf = cudf.DataFrame(
{"x": [1, 5, 3, 4, 1], "y": [1, 1, 2, 2, 5], "z": [1, 2, 3, 4, 5]}
)
pdf = gdf.to_pandas()
gdg = gdf.groupby(["x", "y"]).agg({"y": ["count", "mean"]})
pdg = pdf.groupby(["x", "y"]).agg({"y": ["count", "mean"]})
assert_eq(pdg.reset_index(), gdg.reset_index(), check_dtype=False)
gdg = gdf.groupby(["x", "z"]).agg({"y": ["count", "mean"]})
pdg = pdf.groupby(["x", "z"]).agg({"y": ["count", "mean"]})
assert_eq(pdg.reset_index(), gdg.reset_index(), check_dtype=False)
def test_groupby_multiindex_columns_from_pandas(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_eq(gdf, pdf)
assert_eq(gdf.T, pdf.T)
def test_multiindex_rows_with_wildcard(pdf, gdf, pdfIndex):
gdfIndex = cudf.from_pandas(pdfIndex)
pdf.index = pdfIndex
gdf.index = gdfIndex
assert_eq(pdf.loc[("a",), :], gdf.loc[("a",), :])
assert_eq(pdf.loc[(("a"), ("store")), :], gdf.loc[(("a"), ("store")), :])
assert_eq(
pdf.loc[(("a"), ("store"), ("storm")), :],
gdf.loc[(("a"), ("store"), ("storm")), :],
)
assert_eq(
pdf.loc[(("a"), ("store"), ("storm"), ("smoke")), :],
gdf.loc[(("a"), ("store"), ("storm"), ("smoke")), :],
)
assert_eq(
pdf.loc[(slice(None), "store"), :], gdf.loc[(slice(None), "store"), :]
)
assert_eq(
pdf.loc[(slice(None), slice(None), "storm"), :],
gdf.loc[(slice(None), slice(None), "storm"), :],
)
assert_eq(
pdf.loc[(slice(None), slice(None), slice(None), "smoke"), :],
gdf.loc[(slice(None), slice(None), slice(None), "smoke"), :],
)
def test_multiindex_multicolumn_zero_row_slice():
gdf = cudf.DataFrame(
{"x": [1, 5, 3, 4, 1], "y": [1, 1, 2, 2, 5], "z": [1, 2, 3, 4, 5]}
)
pdf = gdf.to_pandas()
gdg = gdf.groupby(["x", "y"]).agg({"z": ["count"]}).iloc[:0]
pdg = pdf.groupby(["x", "y"]).agg({"z": ["count"]}).iloc[:0]
assert_eq(pdg, gdg, check_dtype=False)
def test_multicolumn_loc(pdf, pdfIndex):
pdf = pdf.T
pdf.columns = pdfIndex
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.loc[:, "a"], gdf.loc[:, "a"])
assert_eq(pdf.loc[:, ("a", "store")], gdf.loc[:, ("a", "store")])
assert_eq(pdf.loc[:, "a":"b"], gdf.loc[:, "a":"b"])
assert_eq(pdf.loc[:, ["a", "b"]], gdf.loc[:, ["a", "b"]])
def test_multicolumn_set_item(pdf, pdfIndex):
pdf = pdf.T
pdf.columns = pdfIndex
gdf = cudf.from_pandas(pdf)
pdf["d"] = [1, 2, 3, 4, 5]
gdf["d"] = [1, 2, 3, 4, 5]
assert_eq(pdf, gdf)
def test_multiindex_iter_error():
midx = cudf.MultiIndex(
levels=[[1, 3, 4, 5], [1, 2, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
)
with pytest.raises(
TypeError,
match=re.escape(
f"{midx.__class__.__name__} object is not iterable. "
f"Consider using `.to_arrow()`, `.to_pandas()` or `.values_host` "
f"if you wish to iterate over the values."
),
):
iter(midx)
def test_multiindex_values():
midx = cudf.MultiIndex(
levels=[[1, 3, 4, 5], [1, 2, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
)
result = midx.values
assert isinstance(result, cp.ndarray)
np.testing.assert_array_equal(
result.get(), np.array([[1, 1], [1, 5], [3, 2], [4, 2], [5, 1]])
)
def test_multiindex_values_host():
midx = cudf.MultiIndex(
levels=[[1, 3, 4, 5], [1, 2, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
)
pmidx = midx.to_pandas()
assert_eq(midx.values_host, pmidx.values)
@pytest.mark.parametrize(
"pdi, fill_value, expected",
[
(
pd.MultiIndex(
levels=[[1, 3, 4, None], [1, 2, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
),
5,
pd.MultiIndex(
levels=[[1, 3, 4, 5], [1, 2, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
),
),
(
pd.MultiIndex(
levels=[[1, 3, 4, None], [1, None, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
),
100,
pd.MultiIndex(
levels=[[1, 3, 4, 100], [1, 100, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
),
),
(
pd.MultiIndex(
levels=[["a", "b", "c", None], ["1", None, "5"]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
),
"100",
pd.MultiIndex(
levels=[["a", "b", "c", "100"], ["1", "100", "5"]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
),
),
],
)
def test_multiIndex_fillna(pdi, fill_value, expected):
gdi = cudf.from_pandas(pdi)
assert_eq(expected, gdi.fillna(fill_value))
@pytest.mark.parametrize(
"pdi",
[
pd.MultiIndex(
levels=[[], [], []],
codes=[[], [], []],
names=["one", "two", "three"],
),
pd.MultiIndex.from_tuples(
list(
zip(
*[
[
"bar",
"bar",
"baz",
"baz",
"foo",
"foo",
"qux",
"qux",
],
[
"one",
"two",
"one",
"two",
"one",
"two",
"one",
"two",
],
]
)
)
),
],
)
def test_multiIndex_empty(pdi):
gdi = cudf.from_pandas(pdi)
assert_eq(pdi.empty, gdi.empty)
@pytest.mark.parametrize(
"pdi",
[
pd.MultiIndex(
levels=[[], [], []],
codes=[[], [], []],
names=["one", "two", "three"],
),
pd.MultiIndex.from_tuples(
list(
zip(
*[
[
"bar",
"bar",
"baz",
"baz",
"foo",
"foo",
"qux",
"qux",
],
[
"one",
"two",
"one",
"two",
"one",
"two",
"one",
"two",
],
]
)
)
),
],
)
def test_multiIndex_size(pdi):
gdi = cudf.from_pandas(pdi)
assert_eq(pdi.size, gdi.size)
@pytest.mark.parametrize(
"level",
[
[],
"alpha",
"location",
"weather",
0,
1,
[0, 1],
-1,
[-1, -2],
[-1, "weather"],
],
)
def test_multiindex_droplevel_simple(pdfIndex, level):
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex.droplevel(level), gdfIndex.droplevel(level))
@pytest.mark.parametrize(
"level",
itertools.chain(
*(
itertools.combinations(
("alpha", "location", "weather", "sign", "timestamp"), r
)
for r in range(5)
)
),
)
def test_multiindex_droplevel_name(pdfIndex, level):
level = list(level)
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex.droplevel(level), gdfIndex.droplevel(level))
@pytest.mark.parametrize(
"level",
itertools.chain(*(itertools.combinations(range(5), r) for r in range(5))),
)
def test_multiindex_droplevel_index(pdfIndex, level):
level = list(level)
gdfIndex = cudf.from_pandas(pdfIndex)
assert_eq(pdfIndex.droplevel(level), gdfIndex.droplevel(level))
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("return_indexer", [True, False])
@pytest.mark.parametrize(
"pmidx",
[
pd.MultiIndex(
levels=[[1, 3, 4, 5], [1, 2, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
),
pd.MultiIndex.from_product(
[["bar", "baz", "foo", "qux"], ["one", "two"]],
names=["first", "second"],
),
pd.MultiIndex(
levels=[[], [], []],
codes=[[], [], []],
names=["one", "two", "three"],
),
pd.MultiIndex.from_tuples(
list(
zip(
*[
[
"bar",
"bar",
"baz",
"baz",
"foo",
"foo",
"qux",
"qux",
],
[
"one",
"two",
"one",
"two",
"one",
"two",
"one",
"two",
],
]
)
)
),
],
)
def test_multiindex_sort_values(pmidx, ascending, return_indexer):
pmidx = pmidx
midx = cudf.from_pandas(pmidx)
expected = pmidx.sort_values(
ascending=ascending, return_indexer=return_indexer
)
actual = midx.sort_values(
ascending=ascending, return_indexer=return_indexer
)
if return_indexer:
expected_indexer = expected[1]
actual_indexer = actual[1]
assert_eq(expected_indexer, actual_indexer)
expected = expected[0]
actual = actual[0]
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdi",
[
pd.MultiIndex(
levels=[[1, 3.0, 4, 5], [1, 2.3, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
),
pd.MultiIndex(
levels=[[1, 3, 4, -10], [1, 11, 5]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
),
pd.MultiIndex(
levels=[["a", "b", "c", "100"], ["1", "100", "5"]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
),
pytest.param(
pd.MultiIndex(
levels=[[None, "b", "c", "a"], ["1", None, "5"]],
codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
names=["x", "y"],
),
marks=[
pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/35584"
)
],
),
],
)
@pytest.mark.parametrize("ascending", [True, False])
def test_multiIndex_argsort(pdi, ascending):
gdi = cudf.from_pandas(pdi)
if not ascending:
expected = pdi.argsort()[::-1]
else:
expected = pdi.argsort()
actual = gdi.argsort(ascending=ascending)
assert_eq(expected, actual)
| 28.638124 | 79 | 0.492993 |
36b20fedcdec9b9e3662c9776727b4c338634507
| 759 |
py
|
Python
|
sunkit_dem/tests/test_util.py
|
PaulJWright/sunkit-dem-sandbox
|
804cf5315487568050637e27bb84e5c2eb640ba2
|
[
"BSD-3-Clause"
] | null | null | null |
sunkit_dem/tests/test_util.py
|
PaulJWright/sunkit-dem-sandbox
|
804cf5315487568050637e27bb84e5c2eb640ba2
|
[
"BSD-3-Clause"
] | null | null | null |
sunkit_dem/tests/test_util.py
|
PaulJWright/sunkit-dem-sandbox
|
804cf5315487568050637e27bb84e5c2eb640ba2
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Tests for utilities
"""
import pytest
import numpy as np
import astropy.units as u
from sunkit_dem.util import quantity_1d_to_sequence
wavelengths = np.linspace(0, 1, 10) * u.angstrom
intensities = np.random.rand(wavelengths.shape[0]) * u.ct
@pytest.fixture
def sequence1d():
return quantity_1d_to_sequence(intensities, wavelengths)
def test_dimensions(sequence1d):
assert all(sequence1d.cube_like_dimensions == [10]*u.pix)
def test_common_axis(sequence1d):
common_axis = u.Quantity(sequence1d.common_axis_coords[0])
assert common_axis.shape == wavelengths.shape
assert u.allclose(common_axis, wavelengths, rtol=1e-10)
def test_axis_type(sequence1d):
assert sequence1d.cube_like_array_axis_physical_types == [('em.wl',)]
| 24.483871 | 73 | 0.766798 |
4f6d2fdc3684580cf77cc6b132f9ba9ee858bf6e
| 9,726 |
py
|
Python
|
mpf/devices/ball_lock.py
|
cloudjor/mpf
|
1cf6bf18b0d81120383b0b128b0ebbfa1c62717c
|
[
"MIT"
] | null | null | null |
mpf/devices/ball_lock.py
|
cloudjor/mpf
|
1cf6bf18b0d81120383b0b128b0ebbfa1c62717c
|
[
"MIT"
] | null | null | null |
mpf/devices/ball_lock.py
|
cloudjor/mpf
|
1cf6bf18b0d81120383b0b128b0ebbfa1c62717c
|
[
"MIT"
] | null | null | null |
"""Contains the BallLock device class."""
import asyncio
from collections import deque
from mpf.core.events import event_handler
from mpf.core.device_monitor import DeviceMonitor
from mpf.core.mode_device import ModeDevice
from mpf.core.system_wide_device import SystemWideDevice
@DeviceMonitor("balls_locked", "enabled", "lock_queue")
class BallLock(SystemWideDevice, ModeDevice):
"""Ball lock device which can be used to keep balls in ball devices and control their eject later on."""
config_section = 'ball_locks'
collection = 'ball_locks'
class_label = 'ball_lock'
def __init__(self, machine, name):
"""Initialise ball lock."""
self.lock_devices = None
self.source_playfield = None
super().__init__(machine, name)
# initialise variables
self.balls_locked = 0
self.enabled = False
self._released_balls = 0
self._release_lock = None
self.lock_queue = deque()
def device_removed_from_mode(self, mode):
"""Disable ball lock when mode ends."""
del mode
self.disable()
@property
def can_exist_outside_of_game(self):
"""Return true if this device can exist outside of a game."""
return True
@classmethod
def prepare_config(cls, config, is_mode_config):
"""Add default events when outside mode."""
if not is_mode_config:
if 'enable_events' not in config:
config['enable_events'] = 'ball_started'
if 'disable_events' not in config:
config['disable_events'] = 'ball_will_end'
return super().prepare_config(config, is_mode_config)
@asyncio.coroutine
def _initialize(self):
yield from super()._initialize()
# load lock_devices
self.lock_devices = []
for device in self.config['lock_devices']:
self.lock_devices.append(device)
self.source_playfield = self.config['source_playfield']
@event_handler(10)
def enable(self, **kwargs):
"""Enable the lock.
If the lock is not enabled, no balls will be locked.
Args:
**kwargs: unused
"""
del kwargs
self.debug_log("Enabling...")
if not self.enabled:
self._register_handlers()
self.enabled = True
@event_handler(0)
def disable(self, **kwargs):
"""Disable the lock.
If the lock is not enabled, no balls will be locked.
Args:
**kwargs: unused
"""
del kwargs
self.debug_log("Disabling...")
self._unregister_handlers()
self.enabled = False
@event_handler(1)
def reset(self, **kwargs):
"""Reset the lock.
Will release locked balls. Device will status will stay the same (enabled/disabled). It will wait for those
balls to drain and block ball_ending until they did. Those balls are not included in ball_in_play.
Args:
**kwargs: unused
"""
del kwargs
self._released_balls += self.release_all_balls()
self.balls_locked = 0
if self._released_balls > 0:
# add handler for ball_drain until self._released_balls are drained
self.machine.events.add_handler(event='ball_drain',
handler=self._wait_for_drain)
# block ball_ending
self.machine.events.add_handler(event='ball_ending', priority=10000,
handler=self._block_during_drain)
def _wait_for_drain(self, balls, **kwargs):
del kwargs
if balls <= 0:
return {'balls': balls}
if balls > self._released_balls:
ball_to_reduce = self._released_balls
else:
ball_to_reduce = balls
self._released_balls -= ball_to_reduce
self.debug_log("%s ball of lock drained.", ball_to_reduce)
if self._released_balls <= 0:
if self._release_lock:
self._release_lock.clear()
self._release_lock = None
self.debug_log("All released balls of lock drained.")
self.machine.events.remove_handler_by_event('ball_ending', self._wait_for_drain)
self.machine.events.remove_handler_by_event('ball_drain', self._block_during_drain)
return {'balls': balls - ball_to_reduce}
def _block_during_drain(self, queue, **kwargs):
del kwargs
if self._released_balls > 0:
queue.wait()
self._release_lock = queue
@event_handler(9)
def release_one_if_full(self, **kwargs):
"""Release one ball if lock is full."""
del kwargs
if self.is_full():
self.release_one()
@event_handler(8)
def release_one(self, **kwargs):
"""Release one ball.
Args:
**kwargs: unused
"""
del kwargs
self.release_balls(balls_to_release=1)
@event_handler(7)
def release_all_balls(self):
"""Release all balls in lock."""
return self.release_balls(self.balls_locked)
def release_balls(self, balls_to_release):
"""Release all balls and return the actual amount of balls released.
Args:
balls_to_release: number of ball to release from lock
"""
if not self.lock_queue:
return 0
remaining_balls_to_release = balls_to_release
self.debug_log("Releasing up to %s balls from lock", balls_to_release)
balls_released = 0
while self.lock_queue:
device, balls_locked = self.lock_queue.pop()
balls = balls_locked
balls_in_device = device.balls
if balls > balls_in_device:
balls = balls_in_device
if balls > remaining_balls_to_release:
self.lock_queue.append(
(device, balls_locked - remaining_balls_to_release))
balls = remaining_balls_to_release
device.eject(balls=balls)
balls_released += balls
remaining_balls_to_release -= balls
if remaining_balls_to_release <= 0:
break
if balls_released > 0:
self.machine.events.post(
'ball_lock_' + self.name + '_balls_released',
balls_released=balls_released)
'''event: ball_lock_(name)_balls_released
desc: The ball lock device (name) has just released a ball(s).
args:
balls_released: The number of balls that were just released.
'''
self.balls_locked -= balls_released
return balls_released
def _register_handlers(self):
# register on ball_enter of lock_devices
for device in self.lock_devices:
self.machine.events.add_handler(
'balldevice_' + device.name + '_ball_enter',
self._lock_ball, device=device)
def _unregister_handlers(self):
# unregister ball_enter handlers
self.machine.events.remove_handler(self._lock_ball)
def is_full(self):
"""Return true if lock is full."""
return self.remaining_space_in_lock() == 0
def remaining_space_in_lock(self):
"""Return the remaining capacity of the lock."""
balls = self.config['balls_to_lock'] - self.balls_locked
if balls < 0:
balls = 0
return balls
def _lock_ball(self, device, new_balls, unclaimed_balls, **kwargs):
"""Handle result of _ball_enter event of lock_devices."""
del new_balls
del kwargs
# if full do not take any balls
if self.is_full():
self.debug_log("Cannot lock balls. Lock is full.")
return {'unclaimed_balls': unclaimed_balls}
# if there are no balls do not claim anything
if unclaimed_balls <= 0:
return {'unclaimed_balls': unclaimed_balls}
capacity = self.remaining_space_in_lock()
# take ball up to capacity limit
if unclaimed_balls > capacity:
balls_to_lock = capacity
else:
balls_to_lock = unclaimed_balls
self.balls_locked += balls_to_lock
self.debug_log("Locked %s balls", balls_to_lock)
# post event for ball capture
self.machine.events.post('ball_lock_' + self.name + '_locked_ball',
balls_locked=balls_to_lock,
total_balls_locked=self.balls_locked)
'''event: ball_lock_(name)_locked_ball
desc: The ball lock device (name) has just locked additional ball(s).
args:
balls_locked: The number of new balls just locked.
total_balls_locked: The current total number of balls this device
has locked.
'''
# check if we are full now and post event if yes
if self.is_full():
self.machine.events.post('ball_lock_' + self.name + '_full',
balls=self.balls_locked)
'''event: ball_lock_(name)_full
desc: The ball lock device (name) is now full.
args:
balls: The number of balls currently locked in this device.
'''
self.lock_queue.append((device, unclaimed_balls))
# schedule eject of new balls
self._request_new_balls(balls_to_lock)
return {'unclaimed_balls': unclaimed_balls - balls_to_lock}
def _request_new_balls(self, balls):
"""Request new ball to playfield."""
if self.config['request_new_balls_to_pf']:
self.source_playfield.add_ball(balls=balls)
| 33.308219 | 115 | 0.609295 |
febe643c4d1cd47c8e0f1fe1c775b3cfa02bf215
| 15,777 |
py
|
Python
|
src/frontend/flask_server.py
|
benjamin-maynard/bank-of-anthos
|
337eb2dad5893aeb739c46d8f2ab8e2b80767b20
|
[
"Apache-2.0"
] | null | null | null |
src/frontend/flask_server.py
|
benjamin-maynard/bank-of-anthos
|
337eb2dad5893aeb739c46d8f2ab8e2b80767b20
|
[
"Apache-2.0"
] | null | null | null |
src/frontend/flask_server.py
|
benjamin-maynard/bank-of-anthos
|
337eb2dad5893aeb739c46d8f2ab8e2b80767b20
|
[
"Apache-2.0"
] | null | null | null |
'''
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import datetime
import json
import logging
import os
from flask import Flask, abort, jsonify, make_response, redirect, \
render_template, request, url_for
import requests
import jwt
APP = Flask(__name__)
@APP.route('/version', methods=['GET'])
def version():
"""
Service version endpoint
"""
return os.environ.get('VERSION'), 200
@APP.route('/ready', methods=['GET'])
def readiness():
"""
Readiness probe
"""
return 'ok', 200
@APP.route("/")
def root():
"""
Renders home page or login page, depending on authentication status.
"""
token = request.cookies.get(APP.config['TOKEN_NAME'])
if not verify_token(token):
return login_page()
return home()
@APP.route("/home")
def home():
"""
Renders home page. Redirects to /login if token is not valid
"""
token = request.cookies.get(APP.config['TOKEN_NAME'])
if not verify_token(token):
# user isn't authenticated
return redirect(url_for('login_page'))
token_data = jwt.decode(token, verify=False)
display_name = token_data['name']
username = token_data['user']
account_id = token_data['acct']
hed = {'Authorization': 'Bearer ' + token}
# get balance
balance = None
try:
url = '{}/{}'.format(APP.config["BALANCES_URI"], account_id)
response = requests.get(url=url, headers=hed, timeout=APP.config['BACKEND_TIMEOUT'])
if response:
balance = response.json()
except (requests.exceptions.RequestException, ValueError) as err:
APP.logger.error(str(err))
# get history
transaction_list = None
try:
url = '{}/{}'.format(APP.config["HISTORY_URI"], account_id)
response = requests.get(url=url, headers=hed, timeout=APP.config['BACKEND_TIMEOUT'])
if response:
transaction_list = response.json()
except (requests.exceptions.RequestException, ValueError) as err:
APP.logger.error(str(err))
# get contacts
contacts = []
try:
url = '{}/{}'.format(APP.config["CONTACTS_URI"], username)
response = requests.get(url=url, headers=hed, timeout=APP.config['BACKEND_TIMEOUT'])
if response:
contacts = response.json()
except (requests.exceptions.RequestException, ValueError) as err:
APP.logger.error(str(err))
_populate_contact_labels(account_id, transaction_list, contacts)
return render_template('index.html',
history=transaction_list,
balance=balance,
name=display_name,
account_id=account_id,
contacts=contacts,
message=request.args.get('msg', None),
bank_name=os.getenv('BANK_NAME', 'Bank of Anthos'))
def _populate_contact_labels(account_id, transactions, contacts):
"""
Populate contact labels for the passed transactions.
Side effect:
Take each transaction and set the 'accountLabel' field with the label of
the contact each transaction was associated with. If there was no
associated contact, set 'accountLabel' to None.
If any parameter is None, nothing happens.
Params: account_id - the account id for the user owning the transaction list
transactions - a list of transactions as key/value dicts
[{transaction1}, {transaction2}, ...]
contacts - a list of contacts as key/value dicts
[{contact1}, {contact2}, ...]
"""
if account_id is None or transactions is None or contacts is None:
return
# Map contact accounts to their labels. If no label found, default to None.
contact_map = {c['account_num']: c.get('label') for c in contacts}
# Populate the 'accountLabel' field. If no match found, default to None.
for trans in transactions:
if trans['toAccountNum'] == account_id:
trans['accountLabel'] = contact_map.get(trans['fromAccountNum'])
elif trans['fromAccountNum'] == account_id:
trans['accountLabel'] = contact_map.get(trans['toAccountNum'])
@APP.route('/payment', methods=['POST'])
def payment():
"""
Submits payment request to ledgerwriter service
Fails if:
- token is not valid
- basic validation checks fail
- response code from ledgerwriter is not 201
"""
token = request.cookies.get(APP.config['TOKEN_NAME'])
if not verify_token(token):
# user isn't authenticated
return abort(401)
try:
account_id = jwt.decode(token, verify=False)['acct']
recipient = request.form['account_num']
if recipient == 'add':
recipient = request.form['contact_account_num']
label = request.form.get('contact_label', None)
if label:
# new contact. Add to contacts list
_add_contact(label,
recipient,
APP.config['LOCAL_ROUTING'],
False)
transaction_data = {"fromAccountNum": account_id,
"fromRoutingNum": APP.config['LOCAL_ROUTING'],
"toAccountNum": recipient,
"toRoutingNum": APP.config['LOCAL_ROUTING'],
"amount": int(float(request.form['amount']) * 100)}
_submit_transaction(transaction_data)
return redirect(url_for('home', msg='Payment initiated'))
except requests.exceptions.RequestException as err:
APP.logger.error(str(err))
except UserWarning as warn:
msg = 'Payment failed: {}'.format(str(warn))
return redirect(url_for('home', msg=msg))
return redirect(url_for('home', msg='Payment failed'))
@APP.route('/deposit', methods=['POST'])
def deposit():
"""
Submits deposit request to ledgerwriter service
Fails if:
- token is not valid
- basic validation checks fail
- response code from ledgerwriter is not 201
"""
token = request.cookies.get(APP.config['TOKEN_NAME'])
if not verify_token(token):
# user isn't authenticated
return abort(401)
try:
# get account id from token
account_id = jwt.decode(token, verify=False)['acct']
if request.form['account'] == 'add':
external_account_num = request.form['external_account_num']
external_routing_num = request.form['external_routing_num']
external_label = request.form.get('external_label', None)
if external_label:
# new contact. Add to contacts list
_add_contact(external_label,
external_account_num,
external_routing_num,
True)
else:
account_details = json.loads(request.form['account'])
external_account_num = account_details['account_num']
external_routing_num = account_details['routing_num']
transaction_data = {"fromAccountNum": external_account_num,
"fromRoutingNum": external_routing_num,
"toAccountNum": account_id,
"toRoutingNum": APP.config['LOCAL_ROUTING'],
"amount": int(float(request.form['amount']) * 100)}
_submit_transaction(transaction_data)
return redirect(url_for('home', msg='Deposit accepted'))
except requests.exceptions.RequestException as err:
APP.logger.error(str(err))
except UserWarning as warn:
msg = 'Deposit failed: {}'.format(str(warn))
return redirect(url_for('home', msg=msg))
return redirect(url_for('home', msg='Deposit failed'))
def _submit_transaction(transaction_data):
token = request.cookies.get(APP.config['TOKEN_NAME'])
hed = {'Authorization': 'Bearer ' + token,
'content-type': 'application/json'}
resp = requests.post(url=APP.config["TRANSACTIONS_URI"],
data=jsonify(transaction_data).data,
headers=hed,
timeout=APP.config['BACKEND_TIMEOUT'])
try:
resp.raise_for_status() # Raise on HTTP Status code 4XX or 5XX
except requests.exceptions.HTTPError as err:
raise UserWarning(resp.text)
def _add_contact(label, acct_num, routing_num, is_external_acct=False):
"""
Submits a new contact to the contact service.
Raise: UserWarning if the response status is 4xx or 5xx.
"""
token = request.cookies.get(APP.config['TOKEN_NAME'])
hed = {'Authorization': 'Bearer ' + token,
'content-type': 'application/json'}
contact_data = {
'label': label,
'account_num': acct_num,
'routing_num': routing_num,
'is_external': is_external_acct
}
token_data = jwt.decode(token, verify=False)
url = '{}/{}'.format(APP.config["CONTACTS_URI"], token_data['user'])
resp = requests.post(url=url,
data=jsonify(contact_data).data,
headers=hed,
timeout=APP.config['BACKEND_TIMEOUT'])
try:
resp.raise_for_status() # Raise on HTTP Status code 4XX or 5XX
except requests.exceptions.HTTPError as err:
raise UserWarning(resp.text)
@APP.route("/login", methods=['GET'])
def login_page():
"""
Renders login page. Redirects to /home if user already has a valid token
"""
token = request.cookies.get(APP.config['TOKEN_NAME'])
if verify_token(token):
# already authenticated
return redirect(url_for('home'))
return render_template('login.html',
message=request.args.get('msg', None),
default_user=os.getenv('DEFAULT_USERNAME', ''),
default_password=os.getenv('DEFAULT_PASSWORD', ''),
bank_name=os.getenv('BANK_NAME', 'Bank of Anthos'))
@APP.route('/login', methods=['POST'])
def login():
"""
Submits login request to userservice and saves resulting token
Fails if userservice does not accept input username and password
"""
return _login_helper(request.form['username'],
request.form['password'])
def _login_helper(username, password):
try:
req = requests.get(url=APP.config["LOGIN_URI"],
params={'username': username, 'password': password})
req.raise_for_status() # Raise on HTTP Status code 4XX or 5XX
# login success
token = req.json()['token'].encode('utf-8')
claims = jwt.decode(token, verify=False)
max_age = claims['exp'] - claims['iat']
resp = make_response(redirect(url_for('home')))
resp.set_cookie(APP.config['TOKEN_NAME'], token, max_age=max_age)
return resp
except requests.exceptions.RequestException as err:
APP.logger.error(str(err))
except requests.exceptions.HTTPError as err:
msg = 'Login Failed: {}'.format(req.json().get('msg', ''))
return redirect(url_for('login', msg=msg))
return redirect(url_for('login', msg='Login Failed'))
@APP.route("/signup", methods=['GET'])
def signup_page():
"""
Renders signup page. Redirects to /login if token is not valid
"""
token = request.cookies.get(APP.config['TOKEN_NAME'])
if verify_token(token):
# already authenticated
return redirect(url_for('home'))
return render_template('signup.html',
bank_name=os.getenv('BANK_NAME', 'Bank of Anthos'))
@APP.route("/signup", methods=['POST'])
def signup():
"""
Submits signup request to userservice
Fails if userservice does not accept input form data
"""
try:
# create user
resp = requests.post(url=APP.config["USERSERVICE_URI"],
data=request.form,
timeout=APP.config['BACKEND_TIMEOUT'])
if resp.status_code == 201:
# user created. Attempt login
return _login_helper(request.form['username'],
request.form['password'])
except requests.exceptions.RequestException as err:
APP.logger.error(str(err))
return redirect(url_for('login', msg='Error: Account creation failed'))
@APP.route('/logout', methods=['POST'])
def logout():
"""
Logs out user by deleting token cookie and redirecting to login page
"""
resp = make_response(redirect(url_for('login_page')))
resp.delete_cookie(APP.config['TOKEN_NAME'])
return resp
def verify_token(token):
"""
Validates token using userservice public key
"""
if token is None:
return False
try:
jwt.decode(token, key=APP.config['PUBLIC_KEY'], algorithms='RS256', verify=True)
return True
except jwt.exceptions.InvalidTokenError as err:
APP.logger.debug(err)
return False
# register html template formatters
def format_timestamp_day(timestamp):
""" Format the input timestamp day in a human readable way """
# TODO: time zones?
date = datetime.datetime.strptime(timestamp, APP.config['TIMESTAMP_FORMAT'])
return date.strftime('%d')
def format_timestamp_month(timestamp):
""" Format the input timestamp month in a human readable way """
# TODO: time zones?
date = datetime.datetime.strptime(timestamp, APP.config['TIMESTAMP_FORMAT'])
return date.strftime('%b')
def format_currency(int_amount):
""" Format the input currency in a human readable way """
if int_amount is None:
return '$---'
amount_str = '${:0,.2f}'.format(abs(float(int_amount)/100))
if int_amount < 0:
amount_str = '-' + amount_str
return amount_str
# set up logger
APP.logger.handlers = logging.getLogger('gunicorn.error').handlers
APP.logger.setLevel(logging.getLogger('gunicorn.error').level)
# setup global variables
APP.config["TRANSACTIONS_URI"] = 'http://{}/transactions'.format(
os.environ.get('TRANSACTIONS_API_ADDR'))
APP.config["USERSERVICE_URI"] = 'http://{}/users'.format(
os.environ.get('USERSERVICE_API_ADDR'))
APP.config["BALANCES_URI"] = 'http://{}/balances'.format(
os.environ.get('BALANCES_API_ADDR'))
APP.config["HISTORY_URI"] = 'http://{}/transactions'.format(
os.environ.get('HISTORY_API_ADDR'))
APP.config["LOGIN_URI"] = 'http://{}/login'.format(
os.environ.get('USERSERVICE_API_ADDR'))
APP.config["CONTACTS_URI"] = 'http://{}/contacts'.format(
os.environ.get('CONTACTS_API_ADDR'))
APP.config['PUBLIC_KEY'] = open(os.environ.get('PUB_KEY_PATH'), 'r').read()
APP.config['LOCAL_ROUTING'] = os.getenv('LOCAL_ROUTING_NUM')
APP.config['BACKEND_TIMEOUT'] = 3 # timeout in seconds for calls to the backend
APP.config['TOKEN_NAME'] = 'token'
APP.config['TIMESTAMP_FORMAT'] = '%Y-%m-%dT%H:%M:%S.%f%z'
# register formater functions
APP.jinja_env.globals.update(format_currency=format_currency)
APP.jinja_env.globals.update(format_timestamp_month=format_timestamp_month)
APP.jinja_env.globals.update(format_timestamp_day=format_timestamp_day)
| 36.605568 | 92 | 0.630094 |
570b5fb0f5aab5e919136f723628286783067959
| 23 |
py
|
Python
|
lib/DataIO/__init__.py
|
atomrq/simulab
|
886f2d613ecbac711b41026887f6d9f7ac94b25f
|
[
"BSD-3-Clause"
] | null | null | null |
lib/DataIO/__init__.py
|
atomrq/simulab
|
886f2d613ecbac711b41026887f6d9f7ac94b25f
|
[
"BSD-3-Clause"
] | null | null | null |
lib/DataIO/__init__.py
|
atomrq/simulab
|
886f2d613ecbac711b41026887f6d9f7ac94b25f
|
[
"BSD-3-Clause"
] | null | null | null |
from .xml_io import *
| 11.5 | 22 | 0.695652 |
99fa30867471c13cb31f491dd431847a2bc375ea
| 1,161 |
py
|
Python
|
SIS/models.py
|
toHarsh/Management-System
|
3ae86ca234c9d97c50d32c5378c24a6e189dfbb1
|
[
"MIT"
] | 2 |
2021-01-03T00:36:50.000Z
|
2021-03-20T19:35:04.000Z
|
SIS/models.py
|
toHarsh/sRecords
|
3ae86ca234c9d97c50d32c5378c24a6e189dfbb1
|
[
"MIT"
] | null | null | null |
SIS/models.py
|
toHarsh/sRecords
|
3ae86ca234c9d97c50d32c5378c24a6e189dfbb1
|
[
"MIT"
] | null | null | null |
from SIS import db,login_manager
from flask_login import UserMixin
# @app.before_first_request
# def create_tables():
# db.create_all()
@login_manager.user_loader
def get_user(user_id):
return Admin.query.get(int(user_id))
class Info(db.Model,UserMixin):
id = db.Column(db.Integer,primary_key=True)
rollNo = db.Column(db.Integer,unique=True, nullable=False)
prn = db.Column(db.Integer,unique=True, nullable=False)
name = db.Column(db.String(40), nullable=False)
mobNo = db.Column(db.Integer,unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
city = db.Column(db.String(40), nullable=False)
state = db.Column(db.String(40), nullable=False)
def __repr__(self):
return f"Info('{self.rollNo}','{self.prn}','{self.name}','{self.mobNo}','{self.email}','{self.city}','{self.state}')"
class Admin(db.Model,UserMixin):
id = db.Column(db.Integer,primary_key=True)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String(40), nullable=False)
def __repr__(self):
return f"Admin('{self.email}','{self.password}')"
| 37.451613 | 125 | 0.694229 |
8e710962fb1ebcec099e723dc3308c2eab516d16
| 681 |
py
|
Python
|
apps/project/models/credit.py
|
rainydaygit/testtcloudserver
|
8037603efe4502726a4d794fb1fc0a3f3cc80137
|
[
"MIT"
] | 349 |
2020-08-04T10:21:01.000Z
|
2022-03-23T08:31:29.000Z
|
apps/project/models/credit.py
|
rainydaygit/testtcloudserver
|
8037603efe4502726a4d794fb1fc0a3f3cc80137
|
[
"MIT"
] | 2 |
2021-01-07T06:17:05.000Z
|
2021-04-01T06:01:30.000Z
|
apps/project/models/credit.py
|
rainydaygit/testtcloudserver
|
8037603efe4502726a4d794fb1fc0a3f3cc80137
|
[
"MIT"
] | 70 |
2020-08-24T06:46:14.000Z
|
2022-03-25T13:23:27.000Z
|
from library.api.db import db, EntityModel
class Credit(EntityModel):
ACTIVE = 0
DISABLE = 1
CREDIT_DATE = 33
CREDIT_SCORE_INIT = 100
CREDIT_ADD_ONCE = 1
CREDIT_SUB_ONCE = -1
user_id = db.Column(db.Integer) # 用户 id
score = db.Column(db.Integer, default=100) # 信用分
status = db.Column(db.Integer, default=0) # 状态 0:可用,1:无效
class CreditRecord(EntityModel):
ACTIVE = 0
DISABLE = 1
user_id = db.Column(db.Integer) # 用户 id
score = db.Column(db.Integer) # 信用分
score_operation = db.Column(db.Integer) # 信用分操作
reason = db.Column(db.String(1000)) # 加减分数原因
status = db.Column(db.Integer, default=0) # 状态 0:可用,1:无效
| 26.192308 | 61 | 0.650514 |
f75c8620d862d82402e85ba5dbaecdc06f04bc2d
| 12,068 |
py
|
Python
|
input-files/desy_2021/INPUT_DESY-2021_Ti64_High_Temp_16alpha_4beta_iCSF_06.py
|
LightForm-group/continuous-peak-fit-analysis
|
e3133a016e64f45be6cb560d5cedc257bfe653a9
|
[
"MIT"
] | null | null | null |
input-files/desy_2021/INPUT_DESY-2021_Ti64_High_Temp_16alpha_4beta_iCSF_06.py
|
LightForm-group/continuous-peak-fit-analysis
|
e3133a016e64f45be6cb560d5cedc257bfe653a9
|
[
"MIT"
] | null | null | null |
input-files/desy_2021/INPUT_DESY-2021_Ti64_High_Temp_16alpha_4beta_iCSF_06.py
|
LightForm-group/continuous-peak-fit-analysis
|
e3133a016e64f45be6cb560d5cedc257bfe653a9
|
[
"MIT"
] | null | null | null |
# Input parameters for Ti64
# data drive.
import os
drive = "/mnt/iusers01/jf01/mbcx9cd4/rds_lightform/"
# properties of the data files.
datafile_directory = drive + 'SXRD_raw_data/desy_2021/diffraction_images/Def_04'
datafile_Basename = "Ti64_Rolled_ND_Compress_910C_1-00s-1_Multi-Hit_Temp_Cycl_4Cs-1_810_Cool_4Cs-1_Def04_3-"
datafile_Ending = ".tif"
datafile_StartNum = 1
datafile_EndNum = 210
datafile_Step = 5
datafile_NumDigit = 5
# calibration and masking.
Calib_type = "Dioptas"
# Calib_detector = 'unknown'
# Calib_data = drive + 'SXRD_analysis/desy_2021/calibration-dioptas/LaB6_1554mm_Dilatometer-00003.tif'
Calib_param = drive + 'SXRD_analysis/desy_2021/calibration-dioptas/LaB6_1554mm_Dilatometer-00003.poni'
Calib_mask = drive + 'SXRD_analysis/desy_2021/calibration-dioptas/LaB6_1554mm_Dilatometer-00003.mask'
Calib_pixels = 200
# number of bins for initial fitting.
AziBins = 90
# fitting properties for peaks.
fit_orders = [
{
"range": [[2.66, 2.91]],
"background": [0,0],
"peak": [{
"phase": "Ti64alpha",
"hkl": '100',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
}],
},
{
"range": [[2.91, 3.34]],
"background": [0,0],
"peak": [{
"phase": "Ti64alpha",
"hkl": '002',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
},{
"phase": "Ti64beta",
"hkl": '110',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
},{
"phase": "Ti64alpha",
"hkl": '101',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
}],
"PeakPositionSelection": [[1,-120.5,3.01],
[1,-58.997,3.01],
[1,59.289,3.01],
[1,23.187,3.01],
[1,23.212,3.01],
[1,23.158,3.01],
[1,123.246,3.01],
[2,-120.5,3.06],
[2,-58.997,3.06],
[2,59.289,3.06],
[2,23.187,3.06],
[2,23.212,3.06],
[2,23.158,3.06],
[2,123.246,3.06],
[3,-120.5,3.16],
[3,-58.997,3.16],
[3,59.289,3.16],
[3,23.187,3.16],
[3,23.212,3.16],
[3,23.158,3.16],
[3,123.246,3.16]]
},
{
"range": [[3.99, 4.24]],
"background": [0,0],
"peak": [{
"phase": "Ti64alpha",
"hkl": '102',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
}],
},
{
"range": [[4.23, 4.52]],
"background": [0,0],
"peak": [{
"phase": "Ti64beta",
"hkl": '200',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
}],
},
{
"range": [[4.70, 5.01]],
"background": [0,0],
"peak": [{
"phase": "Ti64alpha",
"hkl": '110',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
}],
},
{
"range": [[5.15, 5.52]],
"background": [0,0],
"peak": [{
"phase": "Ti64alpha",
"hkl": '103',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
}],
},
{
"range": [[5.46, 5.94]],
"background": [0,0],
"peak": [{
"phase": "Ti64alpha",
"hkl": '200',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
},{
"phase": "Ti64alpha",
"hkl": '112',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
},{
"phase": "Ti64alpha",
"hkl": '201',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
}],
"PeakPositionSelection": [[1,-120.5,5.56],
[1,-58.997,5.56],
[1,59.289,5.56],
[1,23.187,5.56],
[1,23.212,5.56],
[1,23.158,5.56],
[1,123.246,5.56],
[2,-120.5,5.68],
[2,-58.997,5.68],
[2,59.289,5.68],
[2,23.187,5.68],
[2,23.212,5.68],
[2,23.158,5.68],
[2,123.246,5.68],
[3,-120.5,5.76],
[3,-58.997,5.76],
[3,59.289,5.76],
[3,23.187,5.76],
[3,23.212,5.76],
[3,23.158,5.76],
[3,123.246,5.76]]
},
{
"range": [[5.93, 6.49]],
"background": [0,0],
"peak": [{
"phase": "Ti64alpha",
"hkl": '004',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
},{
"phase": "Ti64beta",
"hkl": '220',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
},{
"phase": "Ti64alpha",
"hkl": '202',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
}],
"PeakPositionSelection": [[1,-120.5,6.02],
[1,-58.997,6.02],
[1,59.289,6.02],
[1,23.187,6.02],
[1,23.212,6.02],
[1,23.158,6.02],
[1,123.246,6.02],
[2,-120.5,6.12],
[2,-58.997,6.12],
[2,59.289,6.12],
[2,23.187,6.12],
[2,23.212,6.12],
[2,23.158,6.12],
[2,123.246,6.12],
[3,-120.5,6.32],
[3,-58.997,6.32],
[3,59.289,6.32],
[3,23.187,6.32],
[3,23.212,6.32],
[3,23.158,6.32],
[3,123.246,6.32]]
},
{
"range": [[6.53, 6.78]],
"background": [0,0],
"peak": [{
"phase": "Ti64alpha",
"hkl": '104',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
}],
},
{
"range": [[6.77, 7.09]],
"background": [0,0],
"peak": [{
"phase": "Ti64beta",
"hkl": '310',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
}],
},
{
"range": [[7.07, 7.90]],
"background": [0,0],
"peak": [{
"phase": "Ti64alpha",
"hkl": '203',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
},{
"phase": "Ti64alpha",
"hkl": '210',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
},{
"phase": "Ti64alpha",
"hkl": '211',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
},{
"phase": "Ti64alpha",
"hkl": '114',
"d-space": 2,
"height": 6,
"profile": 0,
#"profile_fixed": 1,
"width": 0,
"symmetry": 2
}],
"PeakPositionSelection": [[1,-120.5,7.16],
[1,-58.997,7.16],
[1,59.289,7.16],
[1,23.187,7.16],
[1,23.212,7.16],
[1,23.158,7.16],
[1,123.246,7.16],
[2,-120.5,7.36],
[2,-58.997,7.36],
[2,59.289,7.36],
[2,23.187,7.36],
[2,23.212,7.36],
[2,23.158,7.36],
[2,123.246,7.36],
[3,-120.5,7.50],
[3,-58.997,7.50],
[3,59.289,7.50],
[3,23.187,7.50],
[3,23.212,7.50],
[3,23.158,7.50],
[3,123.246,7.50],
[4,-120.5,7.71],
[4,-58.997,7.71],
[4,59.289,7.71],
[4,23.187,7.71],
[4,23.212,7.71],
[4,23.158,7.71],
[4,123.246,7.71]]
},
]
# output settings
Output_directory = drive + 'SXRD_analysis/desy_2021/experiment04-deformation/fourier-peak-analysis/'
Output_type = 'MultiFit' #'DifferentialStrain' # differential strain option gives the principal stress/strain axis
Output_NumAziWrite = 360
| 33.337017 | 121 | 0.297812 |
56f149eb7708d98f1dbbb6b45cd60186b65e62bc
| 1,145 |
py
|
Python
|
rootfs/patcher-script.py
|
Cryptophobia/postgres
|
48f9ac08d00a5ee0d61647027623c6f26fb325b9
|
[
"MIT"
] | 2 |
2019-03-10T04:08:18.000Z
|
2019-06-25T01:07:30.000Z
|
rootfs/patcher-script.py
|
Cryptophobia/postgres
|
48f9ac08d00a5ee0d61647027623c6f26fb325b9
|
[
"MIT"
] | 14 |
2018-04-04T17:46:56.000Z
|
2020-10-10T18:07:23.000Z
|
rootfs/patcher-script.py
|
Cryptophobia/postgres
|
48f9ac08d00a5ee0d61647027623c6f26fb325b9
|
[
"MIT"
] | 6 |
2018-04-04T01:10:25.000Z
|
2021-12-11T00:58:35.000Z
|
import sys
patch_script_tmp = """
def run_patch_scripts(patch_script_path):
with open(patch_script_path, 'r') as f:
try:
exec(f.read())
except:
pass
run_patch_scripts("%s")
"""
def main(patch_file, patch_script_file):
result_list = []
patch_script = patch_script_tmp % patch_script_file
with open(patch_file, "r") as f:
has_patched = False
for line in f:
if (line.startswith('import') or line.startswith('from')) \
and not has_patched:
result_list.append(patch_script)
has_patched = True
result_list.append(line)
if not has_patched: result_list.append(patch_script)
with open(patch_file, "w") as f:
for line in result_list:
f.write(line)
if __name__ == '__main__':
patch_type = sys.argv[1]
if patch_type == 'file':
patch_file = sys.argv[2]
elif patch_type == 'module':
module = __import__(sys.argv[2], fromlist=True)
patch_file = module.__file__
patch_script_file = sys.argv[3]
main(patch_file, patch_script_file)
| 27.261905 | 71 | 0.60786 |
f9958b1e24c8825c43835f26deeed5d062fa8ede
| 8,032 |
py
|
Python
|
gmn/src/d1_gmn/app/urls.py
|
DataONEorg/d1_python
|
dfab267c3adea913ab0e0073ed9dc1ee50b5b8eb
|
[
"Apache-2.0"
] | 15 |
2016-10-28T13:56:52.000Z
|
2022-01-31T19:07:49.000Z
|
gmn/src/d1_gmn/app/urls.py
|
DataONEorg/d1_python
|
dfab267c3adea913ab0e0073ed9dc1ee50b5b8eb
|
[
"Apache-2.0"
] | 56 |
2017-03-16T03:52:32.000Z
|
2022-03-12T01:05:28.000Z
|
gmn/src/d1_gmn/app/urls.py
|
DataONEorg/d1_python
|
dfab267c3adea913ab0e0073ed9dc1ee50b5b8eb
|
[
"Apache-2.0"
] | 11 |
2016-05-31T16:22:02.000Z
|
2020-10-05T14:37:10.000Z
|
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""URL to view mapping."""
import d1_common.utils.filesystem
import d1_common.utils.ulog
import django.conf.urls
import django.views.static
import d1_gmn.app.views.external
import d1_gmn.app.views.get_package
import d1_gmn.app.views.gmn
import d1_gmn.app.views.internal
# from django.urls import path
# from django.views.generic import TemplateView
# Return 404 and 500 as UI page when DEBUG=False
handler404 = "d1_gmn.app.views.internal.error_404"
handler500 = "d1_gmn.app.views.internal.error_500"
urlpatterns = [
# Django's URL dispatcher does not take HTTP method into account, so in the
# cases where the DataONE REST API specifies different methods as different
# methods against the same URL, the methods are dispatched to the same view
# function, which checks the method and dispatches to the appropriate handler.
# Tier 1: Core API (MNCore)
# MNCore.ping() - GET /monitor/ping
django.conf.urls.url(
r"^v[12]/monitor/ping/?$",
d1_gmn.app.views.external.get_monitor_ping,
kwargs={"allowed_method_list": ["GET"]},
name="get_monitor_ping",
),
# MNCore.getLogRecords() - GET /log
django.conf.urls.url(
r"^v[12]/log/?$",
d1_gmn.app.views.external.get_log,
kwargs={"allowed_method_list": ["GET"]},
name="get_log",
),
# MNCore.getCapabilities() - GET /node
# Also available via Apache redirect from /
django.conf.urls.url(
r"^v[12]/(?:node/?)?$",
d1_gmn.app.views.external.get_node,
kwargs={"allowed_method_list": ["GET"]},
name="get_node",
),
# Tier 1: Read API (MNRead)
# MNRead.get() - GET /object/{did}
django.conf.urls.url(
r"^v[12]/object/(.+)$",
d1_gmn.app.views.external.dispatch_object,
kwargs={"allowed_method_list": ["GET", "HEAD", "PUT", "DELETE"]},
name="dispatch_object",
),
# MNRead.getSystemMetadata() - GET /meta/{did}
django.conf.urls.url(
r"^v[12]/meta/(.+)$",
d1_gmn.app.views.external.get_meta,
kwargs={"allowed_method_list": ["GET"]},
name="get_meta",
),
# MNStorage.updateSystemMetadata() - PUT /meta
django.conf.urls.url(
r"^v2/meta$",
d1_gmn.app.views.external.put_meta,
kwargs={"allowed_method_list": ["PUT"]},
name="put_meta",
),
# MNRead.describe() - HEAD /object/{did}
# (handled by object dispatcher)
# MNRead.getChecksum() - GET /checksum/{did}
django.conf.urls.url(
r"^v[12]/checksum/(.+)$",
d1_gmn.app.views.external.get_checksum,
kwargs={"allowed_method_list": ["HEAD", "GET"]},
name="get_checksum",
),
# MNRead.listObjects() - GET /object
django.conf.urls.url(
r"^v[12]/object/?$",
d1_gmn.app.views.external.dispatch_object_list,
kwargs={"allowed_method_list": ["GET", "POST"]},
name="dispatch_object_list",
),
# MNRead.synchronizationFailed() - POST /error
django.conf.urls.url(
r"^v[12]/error/?$",
d1_gmn.app.views.external.post_error,
kwargs={"allowed_method_list": ["POST"]},
name="post_error",
),
# MNRead.getReplica() - GET /replica/{did}
django.conf.urls.url(
r"^v[12]/replica/(.+)/?$",
d1_gmn.app.views.external.get_replica,
kwargs={"allowed_method_list": ["GET"]},
name="get_replica",
),
# Tier 2: Authorization API (MNAuthorization)
# MNAuthorization.isAuthorized() - GET /isAuthorized/{did}
django.conf.urls.url(
r"^v[12]/isAuthorized/(.+)/?$",
d1_gmn.app.views.external.get_is_authorized,
kwargs={"allowed_method_list": ["GET"]},
name="get_is_authorized",
),
# MNStorage.systemMetadataChanged() - POST /refreshSystemMetadata/{did}
django.conf.urls.url(
r"^v[12]/dirtySystemMetadata/?$",
d1_gmn.app.views.external.post_refresh_system_metadata,
kwargs={"allowed_method_list": ["POST"]},
name="post_refresh_system_metadata",
),
# Tier 3: Storage API (MNStorage)
# MNStorage.create() - POST /object
# (handled by object dispatcher)
# MNStorage.update() - PUT /object/{did}
# (handled by object dispatcher)
# MNStorage.generateIdentifier()
django.conf.urls.url(
r"^v[12]/generate/?$",
d1_gmn.app.views.external.post_generate_identifier,
kwargs={"allowed_method_list": ["POST", "PUT"]},
name="post_generate_identifier",
),
# MNStorage.delete() - DELETE /object/{did}
# (handled by object dispatcher)
# MNStorage.archive() - PUT /archive/{did}
django.conf.urls.url(
r"^v[12]/archive/(.+)/?$",
d1_gmn.app.views.external.put_archive,
kwargs={"allowed_method_list": ["delete", "PUT"]},
name="put_archive",
),
# Tier 4: Replication API (MNReplication)
# MNReplication.replicate() - POST /replicate
django.conf.urls.url(
r"^v[12]/replicate/?$",
d1_gmn.app.views.external.post_replicate,
kwargs={"allowed_method_list": ["POST"]},
name="post_replicate",
),
# Package API
# MNPackage.getPackage() - GET /package
django.conf.urls.url(
r"^v2/packages/(?P<package_type>.+)/(?P<pid>.+)/?$",
d1_gmn.app.views.get_package.get_package,
kwargs={"allowed_method_list": ["GET"]},
name="get_package",
),
#
# Web UI
#
# Redirect / to /home
django.conf.urls.url(
r"^$",
d1_gmn.app.views.internal.root,
kwargs={"allowed_method_list": ["GET"]},
name="root",
),
django.conf.urls.url(
r"^home/?$",
d1_gmn.app.views.internal.home,
kwargs={"allowed_method_list": ["GET"]},
name="home",
),
django.conf.urls.url(
r"^templates/home.xsl$",
d1_gmn.app.views.internal.home_xslt,
kwargs={"allowed_method_list": ["GET"]},
name="home_xslt",
),
django.conf.urls.url(
r"^templates/clipboard/(.+)/?$",
d1_gmn.app.views.internal.clipboard,
kwargs={"allowed_method_list": ["GET"]},
name="clipboard",
),
#
# GMN vendor specific extensions
#
django.conf.urls.url(
r"^gmn/object/?$",
d1_gmn.app.views.gmn.get_object_list_json,
kwargs={"allowed_method_list": ["GET"]},
name="get_object_list_json",
),
django.conf.urls.url(
r"^gmn/echo/session/?$",
d1_gmn.app.views.gmn.echo_session,
kwargs={"allowed_method_list": ["GET"]},
name="echo_session",
),
django.conf.urls.url(
r"^gmn/echo/request/?$",
d1_gmn.app.views.gmn.echo_request,
kwargs={"allowed_method_list": ["GET"]},
name="echo_request_object",
),
]
if django.conf.settings.STATIC_SERVER:
urlpatterns.append(
django.conf.urls.url(
r"^static/(?P<path>.*)$",
django.views.static.serve,
kwargs={
# 'static': d1_common.util.abs_path('.'),
"document_root": d1_common.utils.filesystem.abs_path("./static"),
"show_indexes": True,
"allowed_method_list": ["GET"],
},
)
)
| 34.472103 | 82 | 0.616907 |
7d94d4eb95863bae878273b8d0d337d0b52f3306
| 88 |
py
|
Python
|
src/micropython.py
|
ev3dev/micropython-linux
|
6586ccdc0db16be0ffc5da459dcff3a7022e62bc
|
[
"MIT"
] | null | null | null |
src/micropython.py
|
ev3dev/micropython-linux
|
6586ccdc0db16be0ffc5da459dcff3a7022e62bc
|
[
"MIT"
] | null | null | null |
src/micropython.py
|
ev3dev/micropython-linux
|
6586ccdc0db16be0ffc5da459dcff3a7022e62bc
|
[
"MIT"
] | null | null | null |
"""Placeholder for MicroPython micropython module"""
def const(expr):
return expr
| 14.666667 | 52 | 0.727273 |
a7d26a3e578bbdd3f89e9cf7233b810c908f5b7e
| 32,819 |
py
|
Python
|
mjmech/video-ui/gbulb/selector_events.py
|
2vin2vin/nslquad
|
26a4397347bb7b92e06814e94fdd6d305dd2ebed
|
[
"Apache-2.0"
] | 64 |
2017-01-18T15:12:05.000Z
|
2022-02-16T08:28:11.000Z
|
mjmech/video-ui/gbulb/selector_events.py
|
2vin2vin/nslquad
|
26a4397347bb7b92e06814e94fdd6d305dd2ebed
|
[
"Apache-2.0"
] | null | null | null |
mjmech/video-ui/gbulb/selector_events.py
|
2vin2vin/nslquad
|
26a4397347bb7b92e06814e94fdd6d305dd2ebed
|
[
"Apache-2.0"
] | 10 |
2017-03-22T16:17:24.000Z
|
2021-12-28T10:23:21.000Z
|
"""Event loop using a selector and related classes.
A selector is a "notify-when-ready" multiplexer. For a subclass which
also includes support for signal handling, see the unix_events sub-module.
"""
__all__ = ['BaseSelectorEventLoop']
import collections
import errno
import socket
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import base_events
from trollius import constants
from trollius import events
from trollius import futures
from trollius import selectors
from trollius import transports
from trollius.log import logger
from trollius import (SSLEOFError, SSLWantReadError, SSLWantWriteError,
BlockingIOError, BrokenPipeError, ChildProcessError,
ConnectionAbortedError, ConnectionRefusedError,
ConnectionResetError, FileNotFoundError,
InterruptedError, PermissionError)
from trollius import py33_exceptions
def wrap_error(func, *args, **kwargs):
try:
return func(*args, **kwargs)
except (socket.error, IOError, OSError) as err:
new_err_cls = py33_exceptions.get_error_class(err.errno, OSError)
raise new_err_cls(err.errno, str(err))
class BaseSelectorEventLoop(base_events.BaseEventLoop):
"""Selector event loop.
See events.EventLoop for API specification.
"""
def __init__(self, selector=None):
super(BaseSelectorEventLoop, self).__init__()
# if selector is None:
# selector = selectors.DefaultSelector()
# logger.debug('Using selector: %s', selector.__class__.__name__)
# self._selector = selector
self._make_self_pipe()
def _make_socket_transport(self, sock, protocol, waiter=None,
extra=None, server=None, *args):
return _SelectorSocketTransport(self, sock, protocol, waiter,
extra, server)
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter,
server_side=False, server_hostname=None,
extra=None, server=None, *args):
return _SelectorSslTransport(
self, rawsock, protocol, sslcontext, waiter,
server_side, server_hostname, extra, server)
def _make_datagram_transport(self, sock, protocol,
address=None, extra=None):
return _SelectorDatagramTransport(self, sock, protocol, address, extra)
def close(self):
if self._ssock is not None:
# if self._selector is not None:
self._close_self_pipe()
# self._selector.close()
# self._selector = None
super(BaseSelectorEventLoop, self).close()
def _socketpair(self):
raise NotImplementedError
def _close_self_pipe(self):
self.remove_reader(self._ssock.fileno())
self._ssock.close()
self._ssock = None
self._csock.close()
self._csock = None
self._internal_fds -= 1
def _make_self_pipe(self):
# A self-socket, really. :-)
self._ssock, self._csock = self._socketpair()
self._ssock.setblocking(False)
self._csock.setblocking(False)
self._internal_fds += 1
self.add_reader(self._ssock.fileno(), self._read_from_self)
def _read_from_self(self):
try:
wrap_error(self._ssock.recv, 1)
except (BlockingIOError, InterruptedError):
pass
def _write_to_self(self):
try:
wrap_error(self._csock.send, b'x')
except (BlockingIOError, InterruptedError):
pass
def _start_serving(self, protocol_factory, sock,
sslcontext=None, server=None):
self.add_reader(sock.fileno(), self._accept_connection,
protocol_factory, sock, sslcontext, server)
def _accept_connection(self, protocol_factory, sock,
sslcontext=None, server=None):
try:
conn, addr = wrap_error(sock.accept())
conn.setblocking(False)
except (BlockingIOError, InterruptedError, ConnectionAbortedError):
pass # False alarm.
except OSError as exc:
# There's nowhere to send the error, so just log it.
# TODO: Someone will want an error handler for this.
if exc.errno in (errno.EMFILE, errno.ENFILE,
errno.ENOBUFS, errno.ENOMEM):
# Some platforms (e.g. Linux keep reporting the FD as
# ready, so we remove the read handler temporarily.
# We'll try again in a while.
logger.exception('Accept out of system resource (%s)', exc)
self.remove_reader(sock.fileno())
self.call_later(constants.ACCEPT_RETRY_DELAY,
self._start_serving,
protocol_factory, sock, sslcontext, server)
else:
raise # The event loop will catch, log and ignore it.
else:
if sslcontext:
self._make_ssl_transport(
conn, protocol_factory(), sslcontext, None,
server_side=True, extra={'peername': addr}, server=server)
else:
self._make_socket_transport(
conn, protocol_factory(), extra={'peername': addr},
server=server)
# It's now up to the protocol to handle the connection.
# def add_reader(self, fd, callback, *args):
# """Add a reader callback."""
# handle = events.Handle(callback, args)
# try:
# key = self._selector.get_key(fd)
# except KeyError:
# self._selector.register(fd, selectors.EVENT_READ,
# (handle, None))
# else:
# mask, (reader, writer) = key.events, key.data
# self._selector.modify(fd, mask | selectors.EVENT_READ,
# (handle, writer))
# if reader is not None:
# reader.cancel()
#
# def remove_reader(self, fd):
# """Remove a reader callback."""
# try:
# key = self._selector.get_key(fd)
# except KeyError:
# return False
# else:
# mask, (reader, writer) = key.events, key.data
# mask &= ~selectors.EVENT_READ
# if not mask:
# self._selector.unregister(fd)
# else:
# self._selector.modify(fd, mask, (None, writer))
#
# if reader is not None:
# reader.cancel()
# return True
# else:
# return False
#
# def add_writer(self, fd, callback, *args):
# """Add a writer callback.."""
# handle = events.Handle(callback, args)
# try:
# key = self._selector.get_key(fd)
# except KeyError:
# self._selector.register(fd, selectors.EVENT_WRITE,
# (None, handle))
# else:
# mask, (reader, writer) = key.events, key.data
# self._selector.modify(fd, mask | selectors.EVENT_WRITE,
# (reader, handle))
# if writer is not None:
# writer.cancel()
#
# def remove_writer(self, fd):
# """Remove a writer callback."""
# try:
# key = self._selector.get_key(fd)
# except KeyError:
# return False
# else:
# mask, (reader, writer) = key.events, key.data
# # Remove both writer and connector.
# mask &= ~selectors.EVENT_WRITE
# if not mask:
# self._selector.unregister(fd)
# else:
# self._selector.modify(fd, mask, (reader, None))
#
# if writer is not None:
# writer.cancel()
# return True
# else:
# return False
def sock_recv(self, sock, n):
"""XXX"""
fut = futures.Future(loop=self)
self._sock_recv(fut, False, sock, n)
return fut
def _sock_recv(self, fut, registered, sock, n):
fd = sock.fileno()
if registered:
# Remove the callback early. It should be rare that the
# selector says the fd is ready but the call still returns
# EAGAIN, and I am willing to take a hit in that case in
# order to simplify the common case.
self.remove_reader(fd)
if fut.cancelled():
return
try:
data = wrap_error(sock.recv, n)
except (BlockingIOError, InterruptedError):
self.add_reader(fd, self._sock_recv, fut, True, sock, n)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result(data)
def sock_sendall(self, sock, data):
"""XXX"""
fut = futures.Future(loop=self)
if data:
self._sock_sendall(fut, False, sock, data)
else:
fut.set_result(None)
return fut
def _sock_sendall(self, fut, registered, sock, data):
fd = sock.fileno()
if registered:
self.remove_writer(fd)
if fut.cancelled():
return
try:
n = wrap_error(sock.send, data)
except (BlockingIOError, InterruptedError):
n = 0
except Exception as exc:
fut.set_exception(exc)
return
if n == len(data):
fut.set_result(None)
else:
if n:
data = data[n:]
self.add_writer(fd, self._sock_sendall, fut, True, sock, data)
def sock_connect(self, sock, address):
"""XXX"""
# That address better not require a lookup! We're not calling
# self.getaddrinfo() for you here. But verifying this is
# complicated; the socket module doesn't have a pattern for
# IPv6 addresses (there are too many forms, apparently).
fut = futures.Future(loop=self)
self._sock_connect(fut, False, sock, address)
return fut
def _sock_connect(self, fut, registered, sock, address):
# TODO: Use getaddrinfo() to look up the address, to avoid the
# trap of hanging the entire event loop when the address
# requires doing a DNS lookup. (OTOH, the caller should
# already have done this, so it would be nice if we could
# easily tell whether the address needs looking up or not. I
# know how to do this for IPv4, but IPv6 addresses have many
# syntaxes.)
fd = sock.fileno()
if registered:
self.remove_writer(fd)
if fut.cancelled():
return
try:
if not registered:
# First time around.
wrap_error(sock.connect, address)
else:
err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
def doit():
# Jump to the except clause below.
raise OSError(err, 'Connect call failed %s' % (address,))
wrap_error(doit)
except (BlockingIOError, InterruptedError):
self.add_writer(fd, self._sock_connect, fut, True, sock, address)
except Exception as exc:
print "got exception:", type(exc), exc
fut.set_exception(exc)
else:
fut.set_result(None)
def sock_accept(self, sock):
"""XXX"""
fut = futures.Future(loop=self)
self._sock_accept(fut, False, sock)
return fut
def _sock_accept(self, fut, registered, sock):
fd = sock.fileno()
if registered:
self.remove_reader(fd)
if fut.cancelled():
return
try:
conn, address = wrap_error(sock.accept)
conn.setblocking(False)
except (BlockingIOError, InterruptedError):
self.add_reader(fd, self._sock_accept, fut, True, sock)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result((conn, address))
# def _process_events(self, event_list):
# for key, mask in event_list:
# fileobj, (reader, writer) = key.fileobj, key.data
# if mask & selectors.EVENT_READ and reader is not None:
# if reader._cancelled:
# self.remove_reader(fileobj)
# else:
# self._add_callback(reader)
# if mask & selectors.EVENT_WRITE and writer is not None:
# if writer._cancelled:
# self.remove_writer(fileobj)
# else:
# self._add_callback(writer)
def _stop_serving(self, sock):
self.remove_reader(sock.fileno())
sock.close()
class _FlowControlMixin(transports.Transport):
"""All the logic for (write) flow control in a mix-in base class.
The subclass must implement get_write_buffer_size(). It must call
_maybe_pause_protocol() whenever the write buffer size increases,
and _maybe_resume_protocol() whenever it decreases. It may also
override set_write_buffer_limits() (e.g. to specify different
defaults).
The subclass constructor must call super().__init__(extra). This
will call set_write_buffer_limits().
The user may call set_write_buffer_limits() and
get_write_buffer_size(), and their protocol's pause_writing() and
resume_writing() may be called.
"""
def __init__(self, extra=None):
super(_FlowControlMixin, self).__init__(extra)
self._protocol_paused = False
self.set_write_buffer_limits()
def _maybe_pause_protocol(self):
size = self.get_write_buffer_size()
if size <= self._high_water:
return
if not self._protocol_paused:
self._protocol_paused = True
try:
self._protocol.pause_writing()
except Exception:
logger.exception('pause_writing() failed')
def _maybe_resume_protocol(self):
if (self._protocol_paused and
self.get_write_buffer_size() <= self._low_water):
self._protocol_paused = False
try:
self._protocol.resume_writing()
except Exception:
logger.exception('resume_writing() failed')
def set_write_buffer_limits(self, high=None, low=None):
if high is None:
if low is None:
high = 64*1024
else:
high = 4*low
if low is None:
low = high // 4
if not high >= low >= 0:
raise ValueError('high (%r) must be >= low (%r) must be >= 0' %
(high, low))
self._high_water = high
self._low_water = low
def get_write_buffer_size(self):
raise NotImplementedError
class _SelectorTransport(_FlowControlMixin, transports.Transport):
max_size = 256 * 1024 # Buffer size passed to recv().
_buffer_factory = bytearray # Constructs initial value for self._buffer.
def __init__(self, loop, sock, protocol, extra, server=None):
super(_SelectorTransport, self).__init__(extra)
self._extra['socket'] = sock
self._extra['sockname'] = sock.getsockname()
if 'peername' not in self._extra:
try:
self._extra['peername'] = sock.getpeername()
except socket.error:
self._extra['peername'] = None
self._loop = loop
self._sock = sock
self._sock_fd = sock.fileno()
self._protocol = protocol
self._server = server
self._buffer = self._buffer_factory()
self._conn_lost = 0 # Set when call to connection_lost scheduled.
self._closing = False # Set when close() called.
if self._server is not None:
self._server.attach(self)
def abort(self):
self._force_close(None)
def close(self):
if self._closing:
return
self._closing = True
self._loop.remove_reader(self._sock_fd)
if not self._buffer:
self._conn_lost += 1
self._loop.call_soon(self._call_connection_lost, None)
def _fatal_error(self, exc):
# Should be called from exception handler only.
if not isinstance(exc, (BrokenPipeError, ConnectionResetError)):
logger.exception('Fatal error for %s', self)
self._force_close(exc)
def _force_close(self, exc):
if self._conn_lost:
return
if self._buffer:
self._buffer.clear()
self._loop.remove_writer(self._sock_fd)
if not self._closing:
self._closing = True
self._loop.remove_reader(self._sock_fd)
self._conn_lost += 1
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
self._sock.close()
self._sock = None
self._protocol = None
self._loop = None
server = self._server
if server is not None:
server.detach(self)
self._server = None
def get_write_buffer_size(self):
return len(self._buffer)
class _SelectorSocketTransport(_SelectorTransport):
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
super(_SelectorTransport, self).__init__(
loop, sock, protocol, extra, server)
self._eof = False
self._paused = False
self._loop.add_reader(self._sock_fd, self._read_ready)
self._loop.call_soon(self._protocol.connection_made, self)
if waiter is not None:
self._loop.call_soon(waiter.set_result, None)
def pause_reading(self):
if self._closing:
raise RuntimeError('Cannot pause_reading() when closing')
if self._paused:
raise RuntimeError('Already paused')
self._paused = True
self._loop.remove_reader(self._sock_fd)
def resume_reading(self):
if not self._paused:
raise RuntimeError('Not paused')
self._paused = False
if self._closing:
return
self._loop.add_reader(self._sock_fd, self._read_ready)
def _read_ready(self):
try:
data = wrap_error(self._sock.recv, self.max_size)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._fatal_error(exc)
else:
if data:
self._protocol.data_received(data)
else:
keep_open = self._protocol.eof_received()
if keep_open:
# We're keeping the connection open so the
# protocol can write more, but we still can't
# receive more, so remove the reader callback.
self._loop.remove_reader(self._sock_fd)
else:
self.close()
def write(self, data):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be byte-ish (%r)',
type(data))
if self._eof:
raise RuntimeError('Cannot call write() after write_eof()')
if not data:
return
if self._conn_lost:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
if not self._buffer:
# Optimization: try to send now.
try:
n = wrap_error(self._sock.send, data)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._fatal_error(exc)
return
else:
data = data[n:]
if not data:
return
# Not all was written; register write handler.
self._loop.add_writer(self._sock_fd, self._write_ready)
# Add it to the buffer.
self._buffer.extend(data)
self._maybe_pause_protocol()
def _write_ready(self):
assert self._buffer, 'Data should not be empty'
try:
n = wrap_error(self._sock.send, self._buffer)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._loop.remove_writer(self._sock_fd)
self._buffer.clear()
self._fatal_error(exc)
else:
if n:
del self._buffer[:n]
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
self._loop.remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
elif self._eof:
self._sock.shutdown(socket.SHUT_WR)
def write_eof(self):
if self._eof:
return
self._eof = True
if not self._buffer:
self._sock.shutdown(socket.SHUT_WR)
def can_write_eof(self):
return True
class _SelectorSslTransport(_SelectorTransport):
_buffer_factory = bytearray
def __init__(self, loop, rawsock, protocol, sslcontext, waiter=None,
server_side=False, server_hostname=None,
extra=None, server=None):
if ssl is None:
raise RuntimeError('stdlib ssl module not available')
if server_side:
if not sslcontext:
raise ValueError('Server side ssl needs a valid SSLContext')
else:
if not sslcontext:
# Client side may pass ssl=True to use a default
# context; in that case the sslcontext passed is None.
# The default is the same as used by urllib with
# cadefault=True.
if hasattr(ssl, '_create_stdlib_context'):
sslcontext = ssl._create_stdlib_context(
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=bool(server_hostname))
else:
# Fallback for Python 3.3.
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.set_default_verify_paths()
sslcontext.verify_mode = ssl.CERT_REQUIRED
wrap_kwargs = {
'server_side': server_side,
'do_handshake_on_connect': False,
}
if server_hostname and not server_side and ssl.HAS_SNI:
wrap_kwargs['server_hostname'] = server_hostname
sslsock = sslcontext.wrap_socket(rawsock, **wrap_kwargs)
super(_SelectorSslTransport, self).__init__(
loop, sslsock, protocol, extra, server)
self._server_hostname = server_hostname
self._waiter = waiter
self._rawsock = rawsock
self._sslcontext = sslcontext
self._paused = False
# SSL-specific extra info. (peercert is set later)
self._extra.update(sslcontext=sslcontext)
self._on_handshake()
def _on_handshake(self):
try:
self._sock.do_handshake()
except ssl.SSLWantReadError:
self._loop.add_reader(self._sock_fd, self._on_handshake)
return
except ssl.SSLWantWriteError:
self._loop.add_writer(self._sock_fd, self._on_handshake)
return
except Exception as exc:
self._loop.remove_reader(self._sock_fd)
self._loop.remove_writer(self._sock_fd)
self._sock.close()
if self._waiter is not None:
self._waiter.set_exception(exc)
return
except BaseException as exc:
self._loop.remove_reader(self._sock_fd)
self._loop.remove_writer(self._sock_fd)
self._sock.close()
if self._waiter is not None:
self._waiter.set_exception(exc)
raise
self._loop.remove_reader(self._sock_fd)
self._loop.remove_writer(self._sock_fd)
peercert = self._sock.getpeercert()
if not hasattr(self._sslcontext, 'check_hostname'):
# Verify hostname if requested, Python 3.4+ uses check_hostname
# and checks the hostname in do_handshake()
if (self._server_hostname and
self._sslcontext.verify_mode != ssl.CERT_NONE):
try:
ssl.match_hostname(peercert, self._server_hostname)
except Exception as exc:
self._sock.close()
if self._waiter is not None:
self._waiter.set_exception(exc)
return
# Add extra info that becomes available after handshake.
self._extra.update(peercert=peercert,
cipher=self._sock.cipher(),
compression=self._sock.compression(),
)
self._read_wants_write = False
self._write_wants_read = False
self._loop.add_reader(self._sock_fd, self._read_ready)
self._loop.call_soon(self._protocol.connection_made, self)
if self._waiter is not None:
self._loop.call_soon(self._waiter.set_result, None)
def pause_reading(self):
# XXX This is a bit icky, given the comment at the top of
# _read_ready(). Is it possible to evoke a deadlock? I don't
# know, although it doesn't look like it; write() will still
# accept more data for the buffer and eventually the app will
# call resume_reading() again, and things will flow again.
if self._closing:
raise RuntimeError('Cannot pause_reading() when closing')
if self._paused:
raise RuntimeError('Already paused')
self._paused = True
self._loop.remove_reader(self._sock_fd)
def resume_reading(self):
if not self._paused:
raise ('Not paused')
self._paused = False
if self._closing:
return
self._loop.add_reader(self._sock_fd, self._read_ready)
def _read_ready(self):
if self._write_wants_read:
self._write_wants_read = False
self._write_ready()
if self._buffer:
self._loop.add_writer(self._sock_fd, self._write_ready)
try:
data = wrap_error(self._sock.recv, self.max_size)
except (BlockingIOError, InterruptedError, ssl.SSLWantReadError):
pass
except ssl.SSLWantWriteError:
self._read_wants_write = True
self._loop.remove_reader(self._sock_fd)
self._loop.add_writer(self._sock_fd, self._write_ready)
except Exception as exc:
self._fatal_error(exc)
else:
if data:
self._protocol.data_received(data)
else:
try:
keep_open = self._protocol.eof_received()
if keep_open:
logger.warning('returning true from eof_received() '
'has no effect when using ssl')
finally:
self.close()
def _write_ready(self):
if self._read_wants_write:
self._read_wants_write = False
self._read_ready()
if not (self._paused or self._closing):
self._loop.add_reader(self._sock_fd, self._read_ready)
if self._buffer:
try:
n = wrap_error(self._sock.send, self._buffer)
except (BlockingIOError, InterruptedError,
ssl.SSLWantWriteError):
n = 0
except ssl.SSLWantReadError:
n = 0
self._loop.remove_writer(self._sock_fd)
self._write_wants_read = True
except Exception as exc:
self._loop.remove_writer(self._sock_fd)
self._buffer.clear()
self._fatal_error(exc)
return
if n:
del self._buffer[:n]
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
self._loop.remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
def write(self, data):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be byte-ish (%r)',
type(data))
if not data:
return
if self._conn_lost:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
if not self._buffer:
self._loop.add_writer(self._sock_fd, self._write_ready)
# Add it to the buffer.
self._buffer.extend(data)
self._maybe_pause_protocol()
def can_write_eof(self):
return False
class _SelectorDatagramTransport(_SelectorTransport):
_buffer_factory = collections.deque
def __init__(self, loop, sock, protocol, address=None, extra=None):
super(_SelectorDatagramTransport, self).__init__(
loop, sock, protocol, extra)
self._address = address
self._loop.add_reader(self._sock_fd, self._read_ready)
self._loop.call_soon(self._protocol.connection_made, self)
def get_write_buffer_size(self):
return sum(len(data) for data, _ in self._buffer)
def _read_ready(self):
try:
data, addr = wrap_error(self._sock.recvfrom, self.max_size)
except (BlockingIOError, InterruptedError):
pass
except OSError as exc:
self._protocol.error_received(exc)
except Exception as exc:
self._fatal_error(exc)
else:
self._protocol.datagram_received(data, addr)
def sendto(self, data, addr=None):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be byte-ish (%r)',
type(data))
if not data:
return
if self._address and addr not in (None, self._address):
raise ValueError('Invalid address: must be None or %s' %
(self._address,))
if self._conn_lost and self._address:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
if not self._buffer:
# Attempt to send it right away first.
try:
if self._address:
wrap_error(self._sock.send, data)
else:
wrap_error(self._sock.sendto, data, addr)
return
except (BlockingIOError, InterruptedError):
self._loop.add_writer(self._sock_fd, self._sendto_ready)
except OSError as exc:
self._protocol.error_received(exc)
return
except Exception as exc:
self._fatal_error(exc)
return
# Ensure that what we buffer is immutable.
self._buffer.append((bytes(data), addr))
self._maybe_pause_protocol()
def _sendto_ready(self):
while self._buffer:
data, addr = self._buffer.popleft()
try:
if self._address:
wrap_error(self._sock.send, data)
else:
wrap_error(self._sock.sendto, data, addr)
except (BlockingIOError, InterruptedError):
self._buffer.appendleft((data, addr)) # Try again later.
break
except OSError as exc:
self._protocol.error_received(exc)
return
except Exception as exc:
self._fatal_error(exc)
return
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
self._loop.remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
| 36.025247 | 81 | 0.573022 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.