hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
31bc8401bc79f981d7277b85aa7ace9f41d9a5f6
| 5,040 |
py
|
Python
|
env/lib/python3.8/site-packages/plotly/graph_objs/barpolar/selected/_textfont.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11,750 |
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
env/lib/python3.8/site-packages/plotly/graph_objs/barpolar/selected/_textfont.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,951 |
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
env/lib/python3.8/site-packages/plotly/graph_objs/barpolar/selected/_textfont.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,623 |
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "barpolar.selected"
_path_str = "barpolar.selected.textfont"
_valid_props = {"color"}
# color
# -----
@property
def color(self):
"""
Sets the text font color of selected points.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the text font color of selected points.
"""
def __init__(self, arg=None, color=None, **kwargs):
"""
Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.barpolar.selected.Textfont`
color
Sets the text font color of selected points.
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.barpolar.selected.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.barpolar.selected.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 36.258993 | 82 | 0.556746 |
68a27733cc359e6ac3aa29637dccc307997b858b
| 5,842 |
py
|
Python
|
serverless-rest-api/python-http-sam/tests/integration/conftest.py
|
heeki/serverless-samples
|
debe33cc5efc0cf6b1e37cef16b5783a78098509
|
[
"MIT-0"
] | 45 |
2021-02-25T15:55:47.000Z
|
2022-03-30T15:43:46.000Z
|
serverless-rest-api/python-http-sam/tests/integration/conftest.py
|
heeki/serverless-samples
|
debe33cc5efc0cf6b1e37cef16b5783a78098509
|
[
"MIT-0"
] | 3 |
2021-12-16T23:03:55.000Z
|
2022-01-05T21:18:54.000Z
|
serverless-rest-api/python-http-sam/tests/integration/conftest.py
|
heeki/serverless-samples
|
debe33cc5efc0cf6b1e37cef16b5783a78098509
|
[
"MIT-0"
] | 17 |
2021-05-27T21:55:26.000Z
|
2022-02-21T15:51:38.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import boto3
import os
import pytest
APPLICATION_STACK_NAME = os.getenv('TEST_APPLICATION_STACK_NAME', None)
COGNITO_STACK_NAME = os.getenv('TEST_COGNITO_STACK_NAME', None)
globalConfig = {}
def get_stack_outputs(stack_name):
result = {}
cf_client = boto3.client('cloudformation')
cf_response = cf_client.describe_stacks(StackName=stack_name)
outputs = cf_response["Stacks"][0]["Outputs"]
for output in outputs:
result[output["OutputKey"]] = output["OutputValue"]
return result
def create_cognito_accounts():
result = {}
sm_client = boto3.client('secretsmanager')
idp_client = boto3.client('cognito-idp')
# create regular user account
sm_response = sm_client.get_random_password(ExcludeCharacters='"''`[]{}():;,$/\\<>|=&',
RequireEachIncludedType=True)
result["regularUserName"] = "[email protected]"
result["regularUserPassword"] = sm_response["RandomPassword"]
try:
idp_client.admin_delete_user(UserPoolId=globalConfig["UserPool"],
Username=result["regularUserName"])
except idp_client.exceptions.UserNotFoundException:
print('Regular user haven''t been created previously')
idp_response = idp_client.sign_up(
ClientId=globalConfig["UserPoolClient"],
Username=result["regularUserName"],
Password=result["regularUserPassword"],
UserAttributes=[{"Name": "name", "Value": result["regularUserName"]}]
)
result["regularUserSub"] = idp_response["UserSub"]
idp_client.admin_confirm_sign_up(UserPoolId=globalConfig["UserPool"],
Username=result["regularUserName"])
# get new user authentication info
idp_response = idp_client.initiate_auth(
AuthFlow='USER_PASSWORD_AUTH',
AuthParameters={
'USERNAME': result["regularUserName"],
'PASSWORD': result["regularUserPassword"]
},
ClientId=globalConfig["UserPoolClient"],
)
result["regularUserIdToken"] = idp_response["AuthenticationResult"]["IdToken"]
result["regularUserAccessToken"] = idp_response["AuthenticationResult"]["AccessToken"]
result["regularUserRefreshToken"] = idp_response["AuthenticationResult"]["RefreshToken"]
# create administrative user account
sm_response = sm_client.get_random_password(ExcludeCharacters='"''`[]{}():;,$/\\<>|=&',
RequireEachIncludedType=True)
result["adminUserName"] = "[email protected]"
result["adminUserPassword"] = sm_response["RandomPassword"]
try:
idp_client.admin_delete_user(UserPoolId=globalConfig["UserPool"],
Username=result["adminUserName"])
except idp_client.exceptions.UserNotFoundException:
print('Regular user haven''t been created previously')
idp_response = idp_client.sign_up(
ClientId=globalConfig["UserPoolClient"],
Username=result["adminUserName"],
Password=result["adminUserPassword"],
UserAttributes=[{"Name": "name", "Value": result["adminUserName"]}]
)
result["adminUserSub"] = idp_response["UserSub"]
idp_client.admin_confirm_sign_up(UserPoolId=globalConfig["UserPool"],
Username=result["adminUserName"])
# add administrative user to the admins group
idp_client.admin_add_user_to_group(UserPoolId=globalConfig["UserPool"],
Username=result["adminUserName"],
GroupName=globalConfig["UserPoolAdminGroupName"])
# get new admin user authentication info
idp_response = idp_client.initiate_auth(
AuthFlow='USER_PASSWORD_AUTH',
AuthParameters={
'USERNAME': result["adminUserName"],
'PASSWORD': result["adminUserPassword"]
},
ClientId=globalConfig["UserPoolClient"],
)
result["adminUserIdToken"] = idp_response["AuthenticationResult"]["IdToken"]
result["adminUserAccessToken"] = idp_response["AuthenticationResult"]["AccessToken"]
result["adminUserRefreshToken"] = idp_response["AuthenticationResult"]["RefreshToken"]
return result
def clear_dynamo_tables():
# clear all data from the tables that will be used for testing
dbd_client = boto3.client('dynamodb')
db_response = dbd_client.scan(
TableName=globalConfig['LocationsTable'],
AttributesToGet=['locationid']
)
for item in db_response["Items"]:
dbd_client.delete_item(
TableName=globalConfig['LocationsTable'],
Key={'locationid': {'S': item['locationid']["S"]}}
)
db_response = dbd_client.scan(
TableName=globalConfig['ResourcesTable'],
AttributesToGet=['resourceid']
)
for item in db_response["Items"]:
dbd_client.delete_item(
TableName=globalConfig['ResourcesTable'],
Key={'resourceid': {'S': item['resourceid']["S"]}}
)
db_response = dbd_client.scan(
TableName=globalConfig['BookingsTable'],
AttributesToGet=['bookingid']
)
for item in db_response["Items"]:
dbd_client.delete_item(
TableName=globalConfig['BookingsTable'],
Key={'bookingid': {'S': item['bookingid']["S"]}}
)
return
@pytest.fixture(scope='session')
def global_config(request):
global globalConfig
# load outputs of the stacks to test
globalConfig.update(get_stack_outputs(APPLICATION_STACK_NAME))
globalConfig.update(get_stack_outputs(COGNITO_STACK_NAME))
globalConfig.update(create_cognito_accounts())
clear_dynamo_tables()
return globalConfig
| 42.333333 | 92 | 0.661075 |
e4f741f776bd70289f930d86ff11a75b844ec084
| 1,638 |
py
|
Python
|
svm.py
|
jbreindl/676-deepfake-detection
|
99ad695e9f8eddccd71a0c02246fc1006588e0e9
|
[
"MIT"
] | null | null | null |
svm.py
|
jbreindl/676-deepfake-detection
|
99ad695e9f8eddccd71a0c02246fc1006588e0e9
|
[
"MIT"
] | null | null | null |
svm.py
|
jbreindl/676-deepfake-detection
|
99ad695e9f8eddccd71a0c02246fc1006588e0e9
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import os
from sklearn.svm import SVR
from prnu import extract_multiple_aligned
def train(dirs: list, labels: list):
"""
Train an SVM with the given parameters
Params:
dirs: List of directories corresponding to frames of a video
labels: labels corresponding to the dirs
t: type of svm to train (from cv2.ml)
kernel: type of kernel to use (from cv2.ml)
Returns:
svm: trained svm
"""
# instantiate svm
svm = SVR()
# get PRNU data for all images in directory
prnus = list()
for directory in dirs:
imgs = list()
for fname in os.listdir(directory):
img = cv2.imread(os.path.join(directory, fname), cv2.IMREAD_COLOR)
imgs.append(img)
# get what is essentially PRNU data for whole video and prepare to classify
prnu = extract_multiple_aligned(imgs)
prnus.append(prnu.flatten())
prnus = np.array(prnus)
svm.fit(prnus, labels)
return svm
def classify(in_dir: str, svm) -> int:
"""
Classify an image using a trained SVM
Params:
dir: Directories corresponding to frames of a video
svm: trained svm
Returns:
label: returns label of the data
"""
images = list()
for fname in os.listdir(in_dir):
img = cv2.imread(os.path.join(in_dir, fname))
images.append(img)
prnus = extract_multiple_aligned(images)
prediction = svm.predict(prnus.reshape(1, -1))
return prediction
if __name__ == "__main__":
trained = train(['preprocessed'], [1])
print(classify('preprocessed', trained))
| 29.781818 | 83 | 0.642857 |
e2903c70dc722bdf99fd5efd920ebd2bb164c3f3
| 1,613 |
py
|
Python
|
dashboard/views.py
|
nikhiljohn10/django-auth
|
01d97e8173436c3446f039cfa6472ece3cd9f96a
|
[
"MIT"
] | null | null | null |
dashboard/views.py
|
nikhiljohn10/django-auth
|
01d97e8173436c3446f039cfa6472ece3cd9f96a
|
[
"MIT"
] | null | null | null |
dashboard/views.py
|
nikhiljohn10/django-auth
|
01d97e8173436c3446f039cfa6472ece3cd9f96a
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic.edit import CreateView, UpdateView
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required, permission_required
from dashboard.forms import ProfileForm
from accounts.models import User
from accounts.tools import activater, mailer
@login_required
def home(request):
return render(request, 'dashboard/pages/home.html')
@login_required
def profile(request):
return render(request, 'dashboard/pages/profile.html')
@login_required
@permission_required("is_staff", login_url='/dashboard/')
def users(request):
users = User.objects.filter(is_staff=False)
for user in users:
if not user.email_verified:
user.hashed = activater.make_token(user)
else:
user.hashed = "Verified"
return render(request, 'dashboard/pages/users.html', {'users': users})
@login_required
@permission_required("is_staff", login_url='/dashboard/')
def gmail(request):
credentials = None
if mailer.activated:
credentials = mailer.credentials
return render(request, 'dashboard/pages/gmail.html', {'credentials': credentials})
class ProfileEditView(LoginRequiredMixin, UpdateView):
model = User
template_name = 'dashboard/pages/profile_edit.html'
success_url = '/dashboard/profile'
form_class = ProfileForm
def get_object(self, *args, **kwargs):
return get_object_or_404(User, pk=self.request.user.pk)
profile_edit = ProfileEditView.as_view()
| 29.87037 | 86 | 0.750775 |
560cd86864d954a77cb360e875e057e6fef296d0
| 10,259 |
py
|
Python
|
components/tentacle.py
|
chairmenfrog/FooProxy
|
357f1f46feddb4effec89362776c9edf34471032
|
[
"MIT"
] | null | null | null |
components/tentacle.py
|
chairmenfrog/FooProxy
|
357f1f46feddb4effec89362776c9edf34471032
|
[
"MIT"
] | null | null | null |
components/tentacle.py
|
chairmenfrog/FooProxy
|
357f1f46feddb4effec89362776c9edf34471032
|
[
"MIT"
] | null | null | null |
# coding:utf-8
"""
@author : linkin
@email : [email protected]
@date : 2018-11-03
"""
import asyncio
import datetime
import logging
import random
import time
from string import ascii_letters
import aiohttp
from components.dbhelper import Database
from config.DBsettings import _DB_SETTINGS
from config.config import AGO
from config.config import MAX_T_LEN
from config.config import MAX_V_COUNT
from config.config import RETRIES
from config.config import TARGET_EXPIRE
from config.config import TIMEOUT
from config.config import VALIDATE_LOCAL
from config.config import targets
from const.settings import TARGETS_DB
from const.settings import headers
from tools.async_tools import send_async_http
from tools.util import format_proxies
from tools.util import gen_target_db_name
from tools.util import get_ip_addr
from tools.util import internet_access
from tools.util import time_to_date
logger = logging.getLogger('Tentacle')
class Tentacle(object):
"""
目标库扫描验证类,可以内置在其他部件中,是workstation的“触手”,对
每一个获得的代理IP针对目标网址进行逐个验证,并对本地存有的目标库
进行定时检测扫描,剔除无效的代理IP
"""
def __init__(self, targets=targets):
"""
初始化
:param targets: 默认加载config中的目标url列表targets
"""
self.targets = targets
self.db = Database(_DB_SETTINGS)
def begin(self):
"""
做开始扫描验证前的准备工作:
* 连接数据库
* 清除过期的目标库
* 保存更新存储目标库信息的targets数据库
"""
self.db.connect()
self.clean_expired_targets()
self.save_targets()
def end(self):
self.db.close()
def load_target_db(self) -> dict:
"""
加载所有待验证目标库中的所有数据
"""
_targets = set()
allowed_targets = []
_dict = {}
if AGO:
targets_inside = self.db.all(tname=TARGETS_DB)
for i in targets_inside:
url = i['url']
if url in self.targets:
continue
elif url:
_targets.add(url)
[allowed_targets.extend(i) for i in (self.targets, _targets)]
for url in allowed_targets:
_name = gen_target_db_name(url)
_data = self.db.all(tname=_name)
_dict[url] = _data
logger.info('Loaded %d proxies from db: %s ' % (len(_data), _name))
return _dict
def save_targets(self):
"""
保存当前config设置的targets信息到数据库
"""
data = {}
now = datetime.datetime.now()
j = 0
for i in targets:
inside_data = self.db.select({'url': i}, tname=TARGETS_DB)
if inside_data:
self.db.update({'url': i}, {'validTime': now.isoformat()}, tname=TARGETS_DB)
continue
data['url'] = i
data['createdTime'] = now.isoformat()
data['validTime'] = now.isoformat()
data['db'] = gen_target_db_name(i)
data['_id'] = str(j + random.randint(0, 100000)) + \
ascii_letters[random.randint(0, 52)] + \
str(int(time.time() * 1000))
self.db.save(data, tname=TARGETS_DB)
def clean_expired_targets(self):
"""
清除过期目标库
"""
if not self.db.connected:
return
now = datetime.datetime.now()
expired_created_time = (now - datetime.timedelta(days=TARGET_EXPIRE)).isoformat()
all_data = self.db.all(tname=TARGETS_DB)
for tar in all_data:
if tar['validTime'] < expired_created_time:
db_name = gen_target_db_name(tar['url'])
_std_count = self.db.handler[db_name].drop()
self.db.delete({'url': tar['url']}, tname=TARGETS_DB)
logger.info('Deleted expired target website proxy collection:(%s)' % (db_name))
def run(self):
"""
运行Tentacle
逻辑:
* 创建单一异步session,使用信号量控制连接池
* 判断是否联网
* 联网则加载需要扫描验证的目标库数据
* 每一个目标库扫一遍作为一个周期
* 在扫每一个目标库时加入一次性扫描最大数限制MAX_V_COUNT
"""
logger.info('Running Tentacle.')
self.begin()
loop = asyncio.get_event_loop()
sem = asyncio.Semaphore(MAX_V_COUNT)
conn = aiohttp.TCPConnector(verify_ssl=False, limit=MAX_V_COUNT)
session = aiohttp.ClientSession(loop=loop, connector=conn)
while 1:
if not internet_access():
continue
try:
_dict = self.load_target_db()
for url in _dict:
logger.info('Start the validation of the target url:%s' % url)
data = _dict[url]
_len = len(data)
_count = MAX_V_COUNT if MAX_V_COUNT <= _len else _len
start = 0
while 1:
_data = data[start:start + _count]
if not _data:
logger.info('Target url:%s -> validation finished,total proxies:%d' % (url, _len))
break
tasks = []
for i in _data:
ip = i['ip']
port = i['port']
proxy = format_proxies(':'.join([ip, port]))
tasks.append(self.async_visit_target(self.db, url, proxy, i, sem, session))
loop.run_until_complete(asyncio.gather(*tasks))
start += _count
time.sleep(VALIDATE_LOCAL)
except Exception as e:
self.end()
logger.error('%s,msg: %s ' % (e.__class__, e))
logger.error('Shut down the Tentacle.')
async def async_visit_target(self, db, url, proxy, bullet, sem, session, scan=True):
"""
异步请求协程,对单个代理IP数据进行异步验证
:param db:处理操作的数据库
:param url:目标网站url
:param proxy:要验证对目标网址是否有用的代理IP,dict类型
:param bullet:单个代理ip对象的所有数据
:param sem:协程并发信号量
:param session:异步请求session
:param scan:是否进行的是目标库扫描操作,False则表示进行的是初次入库验证
"""
data = {
'ip': bullet['ip'],
'port': bullet['port'],
'anony_type': bullet['anony_type'],
'address': bullet['address'],
'createdTime': bullet['createdTime'],
'score': bullet['score'],
'test_count': int(bullet['test_count']) + 1,
'url': url,
}
db_name = gen_target_db_name(url)
async with sem:
ret = await send_async_http(session, 'head', url,
retries=RETRIES,
headers=headers,
proxy=proxy['http'],
timeout=TIMEOUT)
t, code = ret['cost'], ret['code']
if code == 200:
data['score'] = round(
(bullet['score'] * bullet['test_count'] + round((1 - t / 15) * 100, 2)) / data['test_count'], 2)
data['total'] = round(data['score'] * data['test_count'], 2)
data['resp_time'] = str(t) + 's'
data['valid_time'] = time_to_date(int(time.time()))
if scan:
self.update(db, data, db_name)
else:
self.success(db, data, db_name)
else:
if scan:
self.fail(db, data, db_name)
async def specified_validate(self, db, bullet, session, sem):
"""
初次入库验证协程,内置在Validator中的Tentacle调用此协程进行代理Ip
从采集器中采集验证后进行初次入目标库的验证操作
:param db:处理操作的数据库对象
:param bullet:单个代理ip对象的所有数据
:param session:异步请求session
:param sem:协程并发信号量
"""
ip = bullet['ip']
port = bullet['port']
proxy = format_proxies(':'.join([ip, port]))
max_thread_count = MAX_T_LEN if MAX_T_LEN <= len(self.targets) else len(self.targets)
allowed_targets = self.targets[:max_thread_count]
tasks = [self.async_visit_target(db, i, proxy, bullet, sem, session, scan=False) for i in allowed_targets]
resp = asyncio.gather(*tasks)
await resp
def success(self, db, bullet, tname):
"""
初次在Validator中调用触手成功验证目标url后进行入库操作
:param db: 处理操作的数据库对象
:param bullet: 单个代理ip对象的所有数据
:param tname: 目标url对应的数据集合
"""
ip = bullet['ip']
port = bullet['port']
_data = db.select({'ip': ip, 'port': port}, tname=tname)
bullet['address'] = get_ip_addr(ip) if bullet['address'] == 'unknown' or \
bullet['address'] == '' else bullet['address']
if _data:
bullet['_id'] = _data[0]['_id']
self.update(db, bullet, tname)
return
bullet['createdTime'] = time_to_date(int(time.time()))
try:
db.save(bullet, tname=tname)
except Exception as e:
logger.error('%s,msg: %s ' % (e.__class__, e))
return
def update(self, db, bullet, tname):
"""
验证成功后对已存在于目标库中的代理数据进行更新
:param db: 处理操作的数据库对象
:param bullet: 单个代理ip对象的所有数据
:param tname: 目标url对应的数据集合
"""
ip = bullet['ip']
port = bullet['port']
if bullet['createdTime'] == '':
bullet['createdTime'] = time_to_date(int(time.time()))
bullet['address'] = get_ip_addr(ip) if bullet['address'] == 'unknown' or \
bullet['address'] == '' else bullet['address']
db.update({'ip': ip, 'port': port}, bullet, tname=tname)
def fail(self, db, bullet, tname):
"""
验证失败对已存在于目标库中的代理数据进行失败操作
:param db: 处理操作的数据库对象
:param bullet: 单个代理ip对象的所有数据
:param tname: 目标url对应的数据集合
"""
try:
ip = bullet['ip']
port = bullet['port']
proxy = ':'.join([ip, port])
db.delete({'ip': ip, 'port': port}, tname=tname)
logger.warning('Deleted inoperative proxy %s in %s' % (proxy, tname))
except Exception as e:
logger.error('%s,msg: %s ' % (e.__class__, e))
return
| 35.49827 | 116 | 0.53358 |
0670ca1fa7db5bdbc21f717f179b73b5e440d1d8
| 8,876 |
py
|
Python
|
readthedocs/profiles/views.py
|
ank-forked/readthedocs.org
|
e4110e8db5d25b7e6c699dd2df1a580b04ee8d16
|
[
"MIT"
] | 1 |
2021-04-27T05:55:34.000Z
|
2021-04-27T05:55:34.000Z
|
readthedocs/profiles/views.py
|
himynamesdave/readthedocs.org
|
38e73cd73efb76461d28a5d9737731b7d7349297
|
[
"MIT"
] | 4 |
2021-02-08T21:06:49.000Z
|
2021-12-13T20:51:17.000Z
|
readthedocs/profiles/views.py
|
himynamesdave/readthedocs.org
|
38e73cd73efb76461d28a5d9737731b7d7349297
|
[
"MIT"
] | 3 |
2016-08-04T12:53:13.000Z
|
2016-11-02T14:17:55.000Z
|
"""
Views for creating, editing and viewing site-specific user profiles.
"""
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.shortcuts import render_to_response
from django.template import RequestContext
def create_profile(request, form_class, success_url=None,
template_name='profiles/private/create_profile.html',
extra_context=None):
"""
Create a profile for the current user, if one doesn't already
exist.
If the user already has a profile, a redirect will be issued to the
:view:`profiles.views.edit_profile` view.
**Optional arguments:**
``extra_context``
A dictionary of variables to add to the template context. Any
callable object in this dictionary will be called to produce
the end result which appears in the context.
``form_class``
The form class to use for validating and creating the user
profile. This form class must define a method named
``save()``, implementing the same argument signature as the
``save()`` method of a standard Django ``ModelForm`` (this
view will call ``save(commit=False)`` to obtain the profile
object, and fill in the user before the final save). If the
profile object includes many-to-many relations, the convention
established by ``ModelForm`` of using a method named
``save_m2m()`` will be used, and so your form class should
also define this method.
``success_url``
The URL to redirect to after successful profile creation. If
this argument is not supplied, this will default to the URL of
:view:`profiles.views.profile_detail` for the newly-created
profile object.
``template_name``
The template to use when displaying the profile-creation
form. If not supplied, this will default to
:template:`profiles/create_profile.html`.
**Context:**
``form``
The profile-creation form.
**Template:**
``template_name`` keyword argument, or
:template:`profiles/create_profile.html`.
"""
try:
profile_obj = request.user.profile
return HttpResponseRedirect(reverse('profiles_edit_profile'))
except ObjectDoesNotExist:
pass
#
# We set up success_url here, rather than as the default value for
# the argument. Trying to do it as the argument's default would
# mean evaluating the call to reverse() at the time this module is
# first imported, which introduces a circular dependency: to
# perform the reverse lookup we need access to profiles/urls.py,
# but profiles/urls.py in turn imports this module.
#
if success_url is None:
success_url = reverse('profiles_profile_detail',
kwargs={'username': request.user.username})
if request.method == 'POST':
form = form_class(data=request.POST, files=request.FILES)
if form.is_valid():
profile_obj = form.save(commit=False)
profile_obj.user = request.user
profile_obj.save()
if hasattr(form, 'save_m2m'):
form.save_m2m()
return HttpResponseRedirect(success_url)
else:
form = form_class()
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(template_name,
{'form': form},
context_instance=context)
create_profile = login_required(create_profile)
def edit_profile(request, form_class, success_url=None,
template_name='profiles/private/edit_profile.html',
extra_context=None):
"""
Edit the current user's profile.
If the user does not already have a profile, a redirect will be issued to
the :view:`profiles.views.create_profile` view.
**Optional arguments:**
``extra_context``
A dictionary of variables to add to the template context. Any
callable object in this dictionary will be called to produce
the end result which appears in the context.
``form_class``
The form class to use for validating and editing the user
profile. This form class must operate similarly to a standard
Django ``ModelForm`` in that it must accept an instance of the
object to be edited as the keyword argument ``instance`` to
its constructor, and it must implement a method named
``save()`` which will save the updates to the object.
``success_url``
The URL to redirect to following a successful edit. If not
specified, this will default to the URL of
:view:`profiles.views.profile_detail` for the profile object
being edited.
``template_name``
The template to use when displaying the profile-editing
form. If not specified, this will default to
:template:`profiles/edit_profile.html`.
**Context:**
``form``
The form for editing the profile.
``profile``
The user's current profile.
**Template:**
``template_name`` keyword argument or
:template:`profiles/edit_profile.html`.
"""
try:
profile_obj = request.user.profile
except ObjectDoesNotExist:
return HttpResponseRedirect(reverse('profiles_profile_create'))
if success_url is None:
success_url = reverse('profiles_profile_detail',
kwargs={'username': request.user.username})
if request.method == 'POST':
form = form_class(data=request.POST, files=request.FILES, instance=profile_obj)
if form.is_valid():
form.save()
return HttpResponseRedirect(success_url)
else:
form = form_class(instance=profile_obj)
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(template_name, {
'form': form,
'profile': profile_obj,
'user': profile_obj.user,
}, context_instance=context)
edit_profile = login_required(edit_profile)
def profile_detail(request, username, public_profile_field=None,
template_name='profiles/public/profile_detail.html',
extra_context=None):
"""
Detail view of a user's profile.
If the user has not yet created a profile, ``Http404`` will be
raised.
**Required arguments:**
``username``
The username of the user whose profile is being displayed.
**Optional arguments:**
``extra_context``
A dictionary of variables to add to the template context. Any
callable object in this dictionary will be called to produce
the end result which appears in the context.
``public_profile_field``
The name of a ``BooleanField`` on the profile model; if the
value of that field on the user's profile is ``False``, the
``profile`` variable in the template will be ``None``. Use
this feature to allow users to mark their profiles as not
being publicly viewable.
If this argument is not specified, it will be assumed that all
users' profiles are publicly viewable.
``template_name``
The name of the template to use for displaying the profile. If
not specified, this will default to
:template:`profiles/profile_detail.html`.
**Context:**
``profile``
The user's profile, or ``None`` if the user's profile is not
publicly viewable (see the description of
``public_profile_field`` above).
**Template:**
``template_name`` keyword argument or
:template:`profiles/profile_detail.html`.
"""
user = get_object_or_404(User, username=username)
try:
profile_obj = user.profile
except ObjectDoesNotExist:
raise Http404
if public_profile_field is not None and \
not getattr(profile_obj, public_profile_field):
profile_obj = None
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(template_name,
{'profile': profile_obj},
context_instance=context)
| 34.671875 | 87 | 0.660545 |
73424785a21b6915f27e2bc02002d27e8d78c4b5
| 291 |
py
|
Python
|
quality/quality/doctype/initial_sample_inspection_report/initial_sample_inspection_report.py
|
D7blacksushi/quality-test-enext
|
d25d50650666696c4687daf2de4879c1bc90416f
|
[
"MIT"
] | 5 |
2018-04-10T17:49:37.000Z
|
2021-04-27T20:13:08.000Z
|
quality/quality/doctype/initial_sample_inspection_report/initial_sample_inspection_report.py
|
creador30/Quality
|
dde1a9dc327982077fe0a7bd069c7a85686d763a
|
[
"MIT"
] | null | null | null |
quality/quality/doctype/initial_sample_inspection_report/initial_sample_inspection_report.py
|
creador30/Quality
|
dde1a9dc327982077fe0a7bd069c7a85686d763a
|
[
"MIT"
] | 10 |
2016-05-11T17:43:18.000Z
|
2020-09-12T10:12:12.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, ESO Electronic (Nemwatik) and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class InitialSampleInspectionReport(Document):
pass
| 26.454545 | 64 | 0.793814 |
c86b63f7a2f6a8b24e618361b82a82e549db2cb2
| 121 |
py
|
Python
|
tests/test_array/test_array_is_empty.py
|
natanfeitosa/pyfunctools
|
b5354e0d737542b03049eb3e347d6ca1ccceb164
|
[
"MIT"
] | 4 |
2021-11-17T15:26:11.000Z
|
2022-03-12T01:30:55.000Z
|
tests/test_array/test_array_is_empty.py
|
natanfeitosa/pyfunctools
|
b5354e0d737542b03049eb3e347d6ca1ccceb164
|
[
"MIT"
] | null | null | null |
tests/test_array/test_array_is_empty.py
|
natanfeitosa/pyfunctools
|
b5354e0d737542b03049eb3e347d6ca1ccceb164
|
[
"MIT"
] | null | null | null |
from pyfunctools import Array
def test_is_empty():
assert Array().is_empty()
assert not Array(1, 2).is_empty()
| 17.285714 | 37 | 0.702479 |
1d92ad433587db9ebfcc7fa2446d066bccec45bd
| 28,729 |
py
|
Python
|
pano/vm.py
|
NiklasRz/panoramix
|
fcb2dfe3df48ceee188cea37793da74aa9a05eb6
|
[
"MIT"
] | null | null | null |
pano/vm.py
|
NiklasRz/panoramix
|
fcb2dfe3df48ceee188cea37793da74aa9a05eb6
|
[
"MIT"
] | null | null | null |
pano/vm.py
|
NiklasRz/panoramix
|
fcb2dfe3df48ceee188cea37793da74aa9a05eb6
|
[
"MIT"
] | null | null | null |
import logging
import sys
from copy import copy
import core.arithmetic as arithmetic
import utils.opcode_dict as opcode_dict
from core.algebra import (
add_op,
bits,
lt_op,
mask_op,
minus_op,
mul_op,
or_op,
sub_op,
to_bytes,
)
from core.arithmetic import is_zero, simplify_bool
from pano.matcher import match
from pano.prettify import pprint_trace
from utils.helpers import (
C,
EasyCopy,
all_concrete,
opcode,
precompiled,
precompiled_var_names,
)
from .stack import Stack, fold_stacks
loader_dests = None
logger = logging.getLogger(__name__)
"""
A symbolic EVM.
It executes the contract, and returns the resulting `trace` of execution - which is the decompiled form.
The most difficult part of this module is the loop detection and simplification algorithm.
In over 10 iterations I didn't find a simpler way that is just as effective.
Unfortunately, because of the complexity, I don't fully understand how it works.
Ergo, I cannot explain it to you :) Good luck!
On the upside, some stuff, like apply_stack is quite straightforward.
"""
def mem_load(pos, size=32):
return ("mem", ("range", pos, size))
def find_nodes(node, f):
assert type(node) == Node
if f(node):
res = [node]
else:
res = []
for n in node.next:
res.extend(find_nodes(n, f))
return res
node_count = 0
class Node:
def __str__(self):
return f"Node({self.jd})"
def __repr__(self):
# technically not a proper _repr_, but some trace printout functions use this
# instead of a proper str
return self.__str__()
def __init__(self, vm, start, safe, stack, condition=True, trace=None):
global node_count
node_count += 1
# if node_count % 1000 == 0:
# print(node_count)
if node_count > 100_000:
raise RuntimeError("Too many nodes / function too big.")
self.vm = vm
self.prev = []
self.next = []
self.trace = trace
self.start = start
self.safe = safe
self.stack = stack
self.history = {}
self.depth = 0
self.label_history = {}
self.label = None
self.condition = condition
stack_obj = Stack(stack)
self.jd = (start, len(stack), tuple(stack_obj.jump_dests(loader_dests)))
def apply_vars(var_list):
for orig_name, new_name in var_list:
assert match(orig_name, ("var", ":name"))
assert match(new_name, ("var", int))
self.trace = replace(self.trace, orig_name, new_name)
for n in self.next:
n.apply_vars(var_list)
def make_trace(self):
if self.trace is None:
return ["nil"]
begin_vars = []
if self.is_label():
for _, var_idx, var_val, _ in self.label.begin_vars:
begin_vars.append(("setvar", var_idx, var_val))
if self.vm.just_fdests and self.trace != [("revert", 0)]:
t = self.trace[0]
if match(t, ("jump", ":target_node", ...)):
begin = [("jd", str(self.jd[0]))] # , str(self.trace))]
else:
begin = ["?"]
else:
begin = []
begin += [("label", self, tuple(begin_vars))] if self.is_label() else []
last = self.trace[-1]
if opcode(last) == "jump":
return begin + self.trace[:-1] + last[1].make_trace()
if m := match(last, ("if", ":cond", ":if_true", ":if_false")):
if_true = m.if_true.make_trace()
if_false = m.if_false.make_trace()
return begin + self.trace[:-1] + [("if", m.cond, if_true, if_false)]
return begin + self.trace
def set_label(self, loop_dest, vars, stack):
self.label = loop_dest
loop_dest.begin_vars = vars
assert len(self.stack) == len(stack)
self.stack = stack
loop_dest.prev_trace = loop_dest.trace
loop_dest.trace = [("jump", self)]
loop_dest.next = []
self.set_prev(loop_dest)
def set_prev(self, prev):
self.prev = prev
self.depth = prev.depth + 1
self.history = copy(prev.history)
self.history[prev.jd] = prev
self.label_history = copy(prev.label_history)
if prev.label:
self.label_history[prev.jd] = prev.label
prev.next.append(self)
def is_label(self):
return self.label is not None
def run(self):
logger.debug("Node.run(%s)", self)
self.prev_trace = self.trace
self.trace = self.vm._run(self.start, self.safe, self.stack, self.condition)
last = self.trace[-1]
if opcode(last) == "jump":
n = last[1]
n.set_prev(self)
if opcode(last) == "if":
if_true, if_false = last[2], last[3]
if_true.set_prev(self)
if_false.set_prev(self)
class VM(EasyCopy):
def __init__(self, loader, just_fdests=False):
global loader_dests
loader_dests = loader.jump_dests
self.loader = loader
# (line_no, op, param)
self.lines = loader.lines # a shortcut
self.just_fdests = just_fdests
self.counter = 0
global node_count
node_count = 0
def run(self, start, history={}, condition=None, re_run=False, stack=()):
func_node = Node(vm=self, start=start, safe=True, stack=list(stack))
trace = [
("setmem", ("range", 0x40, 32), 0x60),
("jump", func_node, "safe", tuple()),
]
root = Node(vm=self, trace=trace, start=start, safe=True, stack=list(stack))
func_node.set_prev(root)
"""
BFS symbolic execution, ends up with a decompiled
code, with labels and gotos.
Depth-first would be way easier to implement, but it tends
to work way slower because of the loops.
"""
for j in range(20): # 20
for i in range(200): # 300
"""
Find all the jumps, and expand them until
the next jump.
"""
self.expand_trace(root)
"""
find all the jumps that lead to an already
reached jumpdest (with similar stack, otherwise
we'd catch function calls as all).
replace them with 'loop' identifier
"""
self.replace_loops(root)
"""
repeat until there are no more jumps
to explore (so, until the trace didn't change)
"""
nodes = find_nodes(root, lambda n: n.trace is None)
if len(nodes) == 0:
break
trace = self.continue_loops(root)
tr = root.make_trace()
nodes = find_nodes(root, lambda n: n.trace is None)
if len(nodes) == 0:
break
tr = root.make_trace()
return tr
def expand_trace(self, root):
nodes = find_nodes(root, lambda n: n.trace is None)
for node in nodes:
node.run()
def replace_loops(self, root):
nodes = find_nodes(root, lambda n: n.trace is None)
for node in nodes:
if (
node.jd in node.history
and node.jd[1] > 0
and len(node.history[node.jd].stack) == len(node.stack)
): # jd[1] == stack_len
folded, vars = fold_stacks(
node.history[node.jd].stack, node.stack, node.depth
)
loop_line = (
"loop",
node.history[node.jd],
node.stack,
folded,
tuple(vars),
)
node.trace = [loop_line]
def continue_loops(self, root):
loop_list = find_nodes(
root,
lambda n: n.trace is not None
and len(n.trace) == 1
and opcode(n.trace[0]) == "loop",
)
for node in loop_list:
assert node.trace is not None
assert len(node.trace) == 1
assert opcode(node.trace[0]) == "loop"
line = node.trace[0]
loop_dest, stack, new_stack, vars = line[1:]
if loop_dest.is_label():
old_stack = loop_dest.stack
beginvars = loop_dest.label.begin_vars
set_vars = []
for _, var_idx, val, stack_pos in beginvars:
sv = ("setvar", var_idx, stack[stack_pos])
set_vars.append(sv)
if len(list(set_vars)) == 0:
folded, var_list = fold_stacks(
old_stack, stack, loop_dest.label.depth
)
node.trace = None
node.set_label(loop_dest, tuple(var_list), folded)
continue
node.trace = [("goto", loop_dest, tuple(set_vars))]
else:
node.trace = None
node.set_label(loop_dest, tuple(vars), new_stack)
def _run(self, start, safe, stack, condition):
logger.debug("VM._run stack=%s", stack)
self.stack = Stack(stack)
trace = []
i = start
lines = self.lines
if i not in lines:
if type(i) != int:
return [("undefined", "remco jump", i)]
else:
return [("invalid", "jumdest", i)]
if not safe:
if lines[i][1] == "jumpdest":
i = self.loader.next_line(i)
if i not in lines:
return [("invalid", "eof?")]
else:
return [("invalid", "jump")]
while True:
line = lines[i]
res = self.handle_jumps(trace, line, condition)
if res is not None:
return res
if line[1] == "jumpdest":
n = Node(
self,
start=i,
safe=False,
stack=tuple(self.stack.stack),
condition=condition,
)
logger.debug("jumpdest %s", n)
trace.append(("jump", n))
return trace
else:
self.apply_stack(trace, line)
i = self.loader.next_line(i)
assert False
def handle_jumps(self, trace, line, condition):
i, op = line[0], line[1]
stack = self.stack
if "--explain" in sys.argv and op in (
"jump",
"jumpi",
"selfdestruct",
"stop",
"return",
"invalid",
"assert_fail",
"revert",
):
trace.append(C.asm(f" {stack}"))
trace.append("")
trace.append(f"[{line[0]}] {C.asm(op)}")
if op in (
"jump",
"jumpi",
"selfdestruct",
"stop",
"return",
"invalid",
"assert_fail",
"revert",
):
logger.debug("[%s] %s", i, op)
if op == "jump":
target = stack.pop()
n = Node(
self,
start=target,
safe=False,
stack=tuple(self.stack.stack),
condition=condition,
)
trace.append(("jump", n))
return trace
if op == "jumpi":
target = stack.pop()
if_condition = simplify_bool(stack.pop())
tuple_stack = tuple(self.stack.stack)
n_true = Node(
self,
start=target,
safe=False,
stack=tuple_stack,
condition=if_condition,
)
n_false = Node(
self,
start=self.loader.next_line(i),
safe=True,
stack=tuple_stack,
condition=is_zero(if_condition),
)
if self.just_fdests:
if (
(m := match(if_condition, ("eq", ":fx_hash", ":is_cd")))
and str(("cd", 0)) in str(m.is_cd)
and isinstance(m.fx_hash, int)
):
n_true.trace = [("funccall", m.fx_hash, target, tuple_stack)]
if (
(m := match(if_condition, ("eq", ":is_cd", ":fx_hash")))
and str(("cd", 0)) in str(m.is_cd)
and isinstance(m.fx_hash, int)
):
n_true.trace = [("funccall", m.fx_hash, target, tuple_stack)]
if_true = ("jump", n_true)
if_false = ("jump", n_false)
bool_condition = arithmetic.eval_bool(
if_condition, condition, symbolic=False
)
if bool_condition is not None:
if bool_condition:
trace.append(("jump", n_true))
return trace # res, False
else:
trace.append(("jump", n_false))
return trace
trace.append(("if", if_condition, n_true, n_false,))
logger.debug("jumpi -> if %s", trace[-1])
return trace
if op == "selfdestruct":
trace.append(("selfdestruct", stack.pop(),))
return trace
if op in ["stop", "assert_fail", "invalid"]:
trace.append((op,))
return trace
if op == "UNKNOWN":
trace.append(("invalid",))
return trace
if op in ["return", "revert"]:
p = stack.pop()
n = stack.pop()
if n == 0:
trace.append((op, 0))
else:
return_data = mem_load(p, n)
trace.append((op, return_data,))
return trace
return None
def apply_stack(self, ret, line):
def trace(exp, *format_args):
try:
logger.debug("Trace: %s", str(exp).format(*format_args))
except Exception:
pass
if type(exp) == str:
ret.append(exp.format(*format_args))
else:
ret.append(exp)
stack = self.stack
op = line[1]
previous_len = stack.len()
if "--verbose" in sys.argv or "--explain" in sys.argv:
trace(C.asm(" " + str(stack)))
trace("")
if "push" not in op and "dup" not in op and "swap" not in op:
trace("[{}] {}", line[0], C.asm(op))
else:
if type(line[2]) == str:
trace("[{}] {} {}", line[0], C.asm(op), C.asm(" ”" + line[2] + "”"))
elif line[2] > 0x1000000000:
trace("[{}] {} {}", line[0], C.asm(op), C.asm(hex(line[2])))
else:
trace("[{}] {} {}", line[0], C.asm(op), C.asm(str(line[2])))
assert op not in [
"jump",
"jumpi",
"revert",
"return",
"stop",
"jumpdest",
"UNKNOWN",
]
param = 0
if len(line) > 2:
param = line[2]
if op in [
"exp",
"and",
"eq",
"div",
"lt",
"gt",
"slt",
"sgt",
"mod",
"xor",
"signextend",
"smod",
"sdiv",
]:
stack.append(arithmetic.eval((op, stack.pop(), stack.pop(),)))
if op in ["mulmod", "addmod"]:
stack.append(("mulmod", stack.pop(), stack.pop(), stack.pop()))
if op == "mul":
stack.append(mul_op(stack.pop(), stack.pop()))
if op == "or":
stack.append(or_op(stack.pop(), stack.pop()))
if op == "shl":
off = stack.pop()
exp = stack.pop()
if all_concrete(off, exp):
stack.append(exp << off)
else:
stack.append(mask_op(exp, shl=off))
if op == "shr":
off = stack.pop()
exp = stack.pop()
if all_concrete(off, exp):
stack.append(exp >> off)
else:
stack.append(mask_op(exp, offset=minus_op(off), shr=off))
if op == "sar":
off = stack.pop()
exp = stack.pop()
if all_concrete(off, exp):
sign = exp & (1 << 255)
if off >= 256:
if sign:
stack.append(2 ** 256 - 1)
else:
stack.append(0)
else:
shifted = exp >> off
if sign:
shifted |= (2 ** 256 - 1) << (256 - off)
stack.append(shifted)
else:
# FIXME: This won't give the right result...
stack.append(mask_op(exp, offset=minus_op(off), shr=off))
if op == "add":
stack.append(add_op(stack.pop(), stack.pop()))
if op == "sub":
left = stack.pop()
right = stack.pop()
if type(left) == int and type(right) == int:
stack.append(arithmetic.sub(left, right))
else:
stack.append(sub_op(left, right))
elif op in ["not", "iszero"]:
stack.append((op, stack.pop()))
elif op == "sha3":
p = stack.pop()
n = stack.pop()
res = mem_load(p, n)
self.counter += 1
vname = f"_{self.counter}"
vval = (
"sha3",
res,
)
trace(("setvar", vname, vval))
stack.append(("var", vname))
elif op == "calldataload":
stack.append(("cd", stack.pop(),))
elif op == "byte":
val = stack.pop()
num = stack.pop()
off = sub_op(256, to_bytes(num))
stack.append(mask_op(val, 8, off, shr=off))
elif op == "selfbalance":
stack.append(("balance", "address",))
elif op == "balance":
addr = stack.pop()
if opcode(addr) == "mask_shl" and addr[:4] == ("mask_shl", 160, 0, 0):
stack.append(("balance", addr[4],))
else:
stack.append(("balance", addr,))
elif op == "swap":
stack.swap(param)
elif op[:3] == "log":
p = stack.pop()
s = stack.pop()
topics = []
param = int(op[3])
for i in range(param):
el = stack.pop()
topics.append(el)
trace(("log", mem_load(p, s),) + tuple(topics))
elif op == "sload":
sloc = stack.pop()
stack.append(("storage", 256, 0, sloc))
elif op == "sstore":
sloc = stack.pop()
val = stack.pop()
trace(("store", 256, 0, sloc, val))
elif op == "mload":
memloc = stack.pop()
loaded = mem_load(memloc)
self.counter += 1
vname = f"_{self.counter}"
trace(("setvar", vname, ("mem", ("range", memloc, 32))))
stack.append(("var", vname))
elif op == "mstore":
memloc = stack.pop()
val = stack.pop()
trace(("setmem", ("range", memloc, 32), val,))
elif op == "mstore8":
memloc = stack.pop()
val = stack.pop()
trace(("setmem", ("range", memloc, 8), val,))
elif op == "extcodecopy":
addr = stack.pop()
mem_pos = stack.pop()
code_pos = stack.pop()
data_len = stack.pop()
trace(
(
"setmem",
("range", mem_pos, data_len),
("extcodecopy", addr, ("range", code_pos, data_len)),
)
)
elif op == "codecopy":
mem_pos = stack.pop()
call_pos = stack.pop()
data_len = stack.pop()
if (type(call_pos), type(data_len)) == (
int,
int,
) and call_pos + data_len < len(self.loader.binary):
res = 0
for i in range(call_pos - 1, call_pos + data_len - 1):
res = res << 8
res += self.loader.binary[
i
] # this breaks with out of range for some contracts
# may be because we're usually getting compiled code binary
# and not runtime binary
trace(
("setmem", ("range", mem_pos, data_len), res)
) # ('bytes', data_len, res)))
else:
trace(
(
"setmem",
("range", mem_pos, data_len),
("code.data", call_pos, data_len,),
)
)
elif op == "codesize":
stack.append(len(self.loader.binary))
elif op == "calldatacopy":
mem_pos = stack.pop()
call_pos = stack.pop()
data_len = stack.pop()
if data_len != 0:
call_data = ("call.data", call_pos, data_len)
# call_data = mask_op(('call.data', bits(add_op(data_len, call_pos))), size=bits(data_len), shl=bits(call_pos))
trace(("setmem", ("range", mem_pos, data_len), call_data))
elif op == "returndatacopy":
mem_pos = stack.pop()
ret_pos = stack.pop()
data_len = stack.pop()
if data_len != 0:
return_data = ("ext_call.return_data", ret_pos, data_len)
# return_data = mask_op(('ext_call.return_data', bits(add_op(data_len, ret_pos))), size=bits(data_len), shl=bits(ret_pos))
trace(("setmem", ("range", mem_pos, data_len), return_data))
elif op == "call":
self.handle_call(op, trace)
elif op == "staticcall":
self.handle_call(op, trace)
elif op == "delegatecall":
gas = stack.pop()
addr = stack.pop()
arg_start = stack.pop()
arg_len = stack.pop()
ret_start = stack.pop()
ret_len = stack.pop()
call_trace = (
"delegatecall",
gas,
addr,
) # arg_start, arg_len, ret_start, ret_len)
if arg_len == 0:
fname = None
fparams = None
elif arg_len == 4:
fname = mem_load(arg_start, 4)
fparams = 0
else:
fname = mem_load(arg_start, 4)
fparams = mem_load(add_op(arg_start, 4), sub_op(arg_len, 4))
call_trace += (fname, fparams)
trace(call_trace)
self.call_len = ret_len
stack.append("delegate.return_code")
if 0 != ret_len:
return_data = ("delegate.return_data", 0, ret_len)
trace(("setmem", ("range", ret_start, ret_len), return_data))
elif op == "callcode":
gas = stack.pop()
addr = stack.pop()
value = stack.pop()
arg_start = stack.pop()
arg_len = stack.pop()
ret_start = stack.pop()
ret_len = stack.pop()
call_trace = (
"callcode",
gas,
addr,
value,
)
if arg_len == 0:
fname = None
fparams = None
elif arg_len == 4:
fname = mem_load(arg_start, 4)
fparams = 0
else:
fname = mem_load(arg_start, 4)
fparams = mem_load(add_op(arg_start, 4), sub_op(arg_len, 4))
call_trace += (fname, fparams)
trace(call_trace)
self.call_len = ret_len
stack.append("callcode.return_code")
if 0 != ret_len:
return_data = ("callcode.return_data", 0, ret_len)
trace(("setmem", ("range", ret_start, ret_len), return_data))
elif op == "create":
wei, mem_start, mem_len = stack.pop(), stack.pop(), stack.pop()
call_trace = ("create", wei)
code = mem_load(mem_start, mem_len)
call_trace += (code,)
trace(call_trace)
stack.append("create.new_address")
elif op == "create2":
wei, mem_start, mem_len, salt = (
stack.pop(),
stack.pop(),
stack.pop(),
stack.pop(),
)
call_trace = ("create2", wei, ("mem", ("range", mem_start, mem_len)), salt)
trace(call_trace)
stack.append("create2.new_address")
elif op[:4] == "push":
stack.append(param)
elif op == "pc":
stack.append(line[0])
elif op == "pop":
stack.pop()
elif op == "dup":
stack.dup(param)
elif op == "msize":
self.counter += 1
vname = f"_{self.counter}"
trace(("setvar", vname, "msize"))
stack.append(("var", vname))
elif op in ("extcodesize", "extcodehash", "blockhash"):
stack.append((op, stack.pop(),))
elif op in [
"callvalue",
"caller",
"address",
"number",
"gas",
"origin",
"timestamp",
"chainid",
"difficulty",
"gasprice",
"coinbase",
"gaslimit",
"calldatasize",
"returndatasize",
]:
stack.append(op)
if stack.len() - previous_len != opcode_dict.stack_diffs[op]:
logger.error("line: %s", line)
logger.error("stack: %s", stack)
logger.error(
"expected %s, got %s stack diff",
opcode_dict.stack_diffs[op],
stack.len() - previous_len,
)
assert False, f"opcode {op} not processed correctly"
stack.cleanup()
def handle_call(self, op, trace):
stack = self.stack
gas = stack.pop()
addr = stack.pop()
if op == "call":
wei = stack.pop()
else:
assert op == "staticcall"
wei = 0
arg_start = stack.pop()
arg_len = stack.pop()
ret_start = stack.pop()
ret_len = stack.pop()
if addr == 4: # Identity
m = mem_load(arg_start, arg_len)
trace(("setmem", ("range", ret_start, arg_len), m))
stack.append("memcopy.success")
elif type(addr) == int and addr in precompiled:
m = mem_load(arg_start, arg_len)
args = mem_load(arg_start, arg_len)
var_name = precompiled_var_names[addr]
trace(("precompiled", var_name, precompiled[addr], args))
trace(("setmem", ("range", ret_start, ret_len), ("var", var_name)))
stack.append("{}.result".format(precompiled[addr]))
else:
assert op in ("call", "staticcall")
call_trace = (
op,
gas,
addr,
wei,
)
if arg_len == 0:
call_trace += None, None
elif arg_len == 4:
call_trace += mem_load(arg_start, 4), None
else:
fname = mem_load(arg_start, 4)
fparams = mem_load(add_op(arg_start, 4), sub_op(arg_len, 4))
call_trace += fname, fparams
trace(call_trace)
# trace(('comment', mem_load(arg_start, arg_len)))
self.call_len = ret_len
stack.append("ext_call.success")
if lt_op(0, ret_len):
return_data = ("ext_call.return_data", 0, ret_len)
trace(("setmem", ("range", ret_start, ret_len), return_data))
| 28.304433 | 153 | 0.461694 |
db0fb9c622c5000fd311bca8aefd7ec5e9457a57
| 371 |
py
|
Python
|
app/__init__.py
|
Leo-g/Flask-CRUD-API
|
7cb50bc8fdca3412f28710a70a2b811463acc1ec
|
[
"MIT"
] | 6 |
2018-08-12T09:31:09.000Z
|
2021-08-01T22:12:25.000Z
|
app/__init__.py
|
Leo-g/Flask-CRUD-API
|
7cb50bc8fdca3412f28710a70a2b811463acc1ec
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
Leo-g/Flask-CRUD-API
|
7cb50bc8fdca3412f28710a70a2b811463acc1ec
|
[
"MIT"
] | 1 |
2018-08-12T09:31:13.000Z
|
2018-08-12T09:31:13.000Z
|
from flask import Flask
# http://flask.pocoo.org/docs/0.10/patterns/appfactories/
def create_app(config_filename):
app = Flask(__name__)
app.config.from_object(config_filename)
from app.basemodels import db
db.init_app(app)
# Blueprints
from app.users.views import users
app.register_blueprint(users, url_prefix='/users')
return app
| 20.611111 | 57 | 0.725067 |
e5ef96ca87f8325d96d8dedcfc913807c592f24f
| 6,012 |
py
|
Python
|
tests/operators/test_node_pod_operator.py
|
lmaczulajtys/kedro-airflow-k8s
|
335e301acf340d6ab4a26f0e694cb854e2b49483
|
[
"Apache-2.0"
] | 14 |
2021-03-08T10:17:33.000Z
|
2022-03-07T01:44:42.000Z
|
tests/operators/test_node_pod_operator.py
|
lmaczulajtys/kedro-airflow-k8s
|
335e301acf340d6ab4a26f0e694cb854e2b49483
|
[
"Apache-2.0"
] | 48 |
2021-03-10T14:32:07.000Z
|
2022-03-14T07:34:38.000Z
|
tests/operators/test_node_pod_operator.py
|
lmaczulajtys/kedro-airflow-k8s
|
335e301acf340d6ab4a26f0e694cb854e2b49483
|
[
"Apache-2.0"
] | 7 |
2021-03-05T13:07:21.000Z
|
2022-02-27T20:06:41.000Z
|
import unittest
from airflow.kubernetes.pod_generator import PodGenerator
from kubernetes.client.models.v1_env_var import V1EnvVar
from kedro_airflow_k8s.operators.node_pod import NodePodOperator
class TestNodePodOperator(unittest.TestCase):
def test_task_create(self):
task = NodePodOperator(
node_name="test_node_name",
namespace="airflow",
volume_disabled=False,
pvc_name="shared_storage",
image="registry.gitlab.com/test_image",
image_pull_policy="Always",
env="test-pipelines",
task_id="test-node-name",
startup_timeout=120,
volume_owner=100,
mlflow_enabled=False,
requests_cpu="500m",
requests_memory="2Gi",
limits_cpu="2",
limits_memory="10Gi",
node_selector_labels={
"size/k8s.io": "huge",
},
labels={"running": "airflow"},
tolerations=[
{
"key": "group",
"value": "data-processing",
"effect": "NoExecute",
}
],
annotations={
"iam.amazonaws.com/role": "airflow",
"vault.hashicorp.com/agent-inject-template-foo": '{{- with secret "database/creds/db-app" -}}\npostgres://{{ .Data.username }}:{{ .Data.password }}@postgres:5432/mydb\n{{- end }}\n', # noqa: E501
},
pipeline="data_science_pipeline",
parameters="ds:{{ ds }}",
env_vars={"var1": "var1value"},
)
pod = task.create_pod_request_obj()
assert pod.metadata.name.startswith("test-node-name")
assert "test-node-name" != pod.metadata.name
assert pod.metadata.namespace == "airflow"
assert len(pod.spec.containers) == 1
container = pod.spec.containers[0]
assert container.image == "registry.gitlab.com/test_image"
assert container.image_pull_policy == "Always"
assert container.args == [
"kedro",
"run",
"-e",
"test-pipelines",
"--pipeline",
"data_science_pipeline",
"--node",
"test_node_name",
"--params",
"ds:{{ ds }}",
]
assert len(pod.spec.volumes) == 1
volume = pod.spec.volumes[0]
assert volume.name == "storage"
assert volume.persistent_volume_claim.claim_name == "shared_storage"
assert len(container.volume_mounts) == 1
volume_mount = container.volume_mounts[0]
assert volume_mount.mount_path == "/home/kedro/data"
assert volume_mount.name == "storage"
assert pod.spec.security_context.fs_group == 100
assert container.resources.limits == {"cpu": "2", "memory": "10Gi"}
assert container.resources.requests == {"cpu": "500m", "memory": "2Gi"}
assert pod.spec.node_selector == {"size/k8s.io": "huge"}
assert pod.spec.tolerations[0].value == "data-processing"
assert pod.metadata.annotations["iam.amazonaws.com/role"] == "airflow"
assert (
pod.metadata.annotations[
"vault.hashicorp.com/agent-inject-template-foo"
]
== """{{- with secret "database/creds/db-app" -}}
postgres://{{ .Data.username }}:{{ .Data.password }}@postgres:5432/mydb
{{- end }}
"""
)
assert pod.spec.service_account_name == "default"
assert len(pod.spec.image_pull_secrets) == 0
assert container.env[0] == V1EnvVar(name="var1", value="var1value")
def test_task_create_no_limits_and_requests(self):
task = NodePodOperator(
node_name="test_node_name",
namespace="airflow",
pvc_name="shared_storage",
image="registry.gitlab.com/test_image",
image_pull_policy="Always",
env="test-pipelines",
task_id="test-node-name",
volume_owner=100,
mlflow_enabled=False,
)
pod = task.create_pod_request_obj()
assert len(pod.spec.containers) == 1
container = pod.spec.containers[0]
assert container.resources.limits == {}
assert container.resources.requests == {}
assert not pod.spec.node_selector
def test_task_with_service_account(self):
task = NodePodOperator(
node_name="test_node_name",
namespace="airflow",
pvc_name="shared_storage",
image="registry.gitlab.com/test_image",
image_pull_policy="Always",
env="test-pipelines",
task_id="test-node-name",
service_account_name="custom_service_account",
image_pull_secrets="top,secret",
mlflow_enabled=False,
)
pod = task.create_pod_request_obj()
assert pod.spec.service_account_name == "custom_service_account"
assert len(pod.spec.image_pull_secrets) == 2
assert pod.spec.image_pull_secrets[0].name == "top"
assert pod.spec.image_pull_secrets[1].name == "secret"
def test_task_with_custom_k8s_pod_template(self):
task = NodePodOperator(
node_name="test_node_name",
namespace="airflow",
pvc_name="shared_storage",
image="registry.gitlab.com/test_image",
image_pull_policy="Always",
env="test-pipelines",
task_id="test-node-name",
volume_owner=100,
mlflow_enabled=False,
kubernetes_pod_template=f"""
type: Pod
metadata:
name: {PodGenerator.make_unique_pod_id('test-node-name')}'
labels:
test: mylabel
spec:
containers:
- name: base
""",
)
pod = task.create_pod_request_obj()
assert pod.metadata.name.startswith("test-node-name")
assert "test-node-name" != pod.metadata.name
assert pod.metadata.labels["test"] == "mylabel"
| 36 | 212 | 0.580007 |
7243f90213858c9b7dc6c45958e7eed0b688a7d0
| 62 |
py
|
Python
|
cfn-lint-serverless/cfn_lint_serverless/__init__.py
|
pedrodelgallego/serverless-rules
|
1ab513667b37edcfcd6341ab50a42242c11303d1
|
[
"MIT-0"
] | 264 |
2021-06-02T20:12:38.000Z
|
2022-03-31T19:48:47.000Z
|
cfn-lint-serverless/cfn_lint_serverless/__init__.py
|
pedrodelgallego/serverless-rules
|
1ab513667b37edcfcd6341ab50a42242c11303d1
|
[
"MIT-0"
] | 22 |
2021-06-03T11:38:28.000Z
|
2021-11-15T15:25:47.000Z
|
cfn-lint-serverless/cfn_lint_serverless/__init__.py
|
pedrodelgallego/serverless-rules
|
1ab513667b37edcfcd6341ab50a42242c11303d1
|
[
"MIT-0"
] | 15 |
2021-06-03T04:37:42.000Z
|
2022-02-28T05:29:54.000Z
|
"""
CloudFormation Lint Rules for Serverless applications
"""
| 15.5 | 53 | 0.774194 |
9a0dc132af52b8b5ab2448fc1ecd5399d74f7de8
| 4,972 |
py
|
Python
|
src/__main__.py
|
othieno/geotagx-tool-validator
|
4090c948021faa1d322bde085246a478e4c7a851
|
[
"MIT"
] | null | null | null |
src/__main__.py
|
othieno/geotagx-tool-validator
|
4090c948021faa1d322bde085246a478e4c7a851
|
[
"MIT"
] | 1 |
2016-11-01T15:21:13.000Z
|
2016-11-01T15:21:13.000Z
|
src/__main__.py
|
othieno/geotagx-tool-validator
|
4090c948021faa1d322bde085246a478e4c7a851
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# This module is part of the GeoTag-X project validator tool.
#
# Author: Jeremy Othieno ([email protected])
#
# Copyright (c) 2016-2017 UNITAR/UNOSAT
#
# The MIT License (MIT)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
def main():
"""Executes the application.
"""
import sys
sys.exit(run(get_argparser().parse_args(sys.argv[1:])))
def run(arguments):
"""Executes the application with the specified command-line arguments.
Args:
arguments (argparse.Namespace): A set of command-line arguments.
Returns:
int: 0 if validation was successful, 1 otherwise.
"""
from helper import sanitize_paths, deserialize_configuration_set, print_exception
from core import is_configuration_set
exit_code = 0
try:
if not arguments.quiet:
_setup_logging(arguments.verbose)
for path in sanitize_paths(arguments.paths):
configuration_set = deserialize_configuration_set(path)
valid, message = is_configuration_set(configuration_set)
if not valid:
print message
exit_code = 1
break
else:
print "The project located at '{}' is valid.".format(path)
except Exception as e:
print_exception(e, arguments.verbose)
exit_code = 1
finally:
return exit_code
def get_argparser(subparsers=None):
"""Constructs the application's command-line argument parser. The validator tool
is a standalone program but also a part of the GeoTag-X toolkit which means
that its arguments can be sub-commands to a specific command. For more information,
check out: https://docs.python.org/2/library/argparse.html#sub-commands
Args:
subparsers (argparse._SubParsersAction): If specified, the argument parser is
created as a parser for a program command, and not the actual program.
Returns:
argparse.ArgumentParser: A command-line argument parser instance.
Raises:
TypeError: If the subparsers argument is not a NoneType or an argparse._SubParsersAction instance.
"""
import argparse
parser = None
parser_arguments = {
"description": "Validate your GeoTag-X projects.",
"add_help": False,
}
if subparsers is None:
parser = argparse.ArgumentParser(prog="geotagx-validator", **parser_arguments)
elif isinstance(subparsers, argparse._SubParsersAction):
parser = subparsers.add_parser("validate", help="Validate your GeoTag-X projects.", **parser_arguments)
parser.set_defaults(run=run)
else:
raise TypeError("Invalid argument type: get_argparser expects 'argparse._SubParsersAction' but got '{}'.".format(type(subparsers).__name__))
options = parser.add_argument_group("OPTIONS")
options.add_argument("-h", "--help", action="help", help="Display this help and exit.")
options.add_argument("-q", "--quiet", action="store_true", help="Suppress all warnings.")
options.add_argument("-v", "--verbose", action="store_true", help="Detail the actions being performed.")
options.add_argument("-V", "--version", action="version", help="Display version information and exit.", version=_version())
parser.add_argument("paths", metavar="PATH", nargs="+")
return parser
def _version():
"""Returns the tool's version string.
"""
from __init__ import __version__
return "GeoTag-X Project Validator v%s, Copyright (C) 2016 UNITAR/UNOSAT." % __version__
def _setup_logging(verbose=False):
"""Sets up logging.
Args:
verbose (bool): If set to True, the validator will log most of its operations,
even the most mundane.
"""
import logging
logging_level = logging.INFO if verbose else logging.WARNING
logging.basicConfig(format="[%(levelname)s] %(message)s", level=logging_level)
if __name__ == "__main__":
main()
| 38.246154 | 148 | 0.699517 |
ddaca700fa333efba42b12e6249471a8dfeadc54
| 4,647 |
py
|
Python
|
selfdrive/manager/process_config.py
|
baldwalker/openpilot-4
|
53938f3adb55a369e79ea5492cd3b5ef06e69670
|
[
"MIT"
] | null | null | null |
selfdrive/manager/process_config.py
|
baldwalker/openpilot-4
|
53938f3adb55a369e79ea5492cd3b5ef06e69670
|
[
"MIT"
] | null | null | null |
selfdrive/manager/process_config.py
|
baldwalker/openpilot-4
|
53938f3adb55a369e79ea5492cd3b5ef06e69670
|
[
"MIT"
] | null | null | null |
import os
from selfdrive.hardware import EON, TICI, PC
from selfdrive.manager.process import PythonProcess, NativeProcess, DaemonProcess
from common.params import Params
WEBCAM = os.getenv("USE_WEBCAM") is not None
EnableLogger = Params().get_bool('OpkrEnableLogger')
EnableUploader = Params().get_bool('OpkrEnableUploader')
EnableOSM = Params().get_bool('OSMEnable') or Params().get_bool('OSMSpeedLimitEnable') or Params().get("CurvDecelOption", encoding="utf8") == "1" or Params().get("CurvDecelOption", encoding="utf8") == "3"
EnableMapbox = Params().get_bool('MapboxEnabled')
EnableShutdownD = Params().get_bool('C2WithCommaPower')
procs = [
DaemonProcess("manage_athenad", "selfdrive.athena.manage_athenad", "AthenadPid"),
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
NativeProcess("camerad", "selfdrive/camerad", ["./camerad"], unkillable=True, driverview=True),
NativeProcess("clocksd", "selfdrive/clocksd", ["./clocksd"]),
NativeProcess("dmonitoringmodeld", "selfdrive/modeld", ["./dmonitoringmodeld"], enabled=(not PC or WEBCAM), driverview=True),
NativeProcess("logcatd", "selfdrive/logcatd", ["./logcatd"]),
#NativeProcess("loggerd", "selfdrive/loggerd", ["./loggerd"]),
NativeProcess("modeld", "selfdrive/modeld", ["./modeld"]),
#NativeProcess("navd", "selfdrive/ui/navd", ["./navd"], enabled=(PC or TICI or EON), persistent=True),
NativeProcess("proclogd", "selfdrive/proclogd", ["./proclogd"]),
NativeProcess("sensord", "selfdrive/sensord", ["./sensord"], enabled=not PC, persistent=EON, sigkill=EON),
NativeProcess("ubloxd", "selfdrive/locationd", ["./ubloxd"], enabled=(not PC or WEBCAM)),
NativeProcess("ui", "selfdrive/ui", ["./ui"], persistent=True, watchdog_max_dt=(5 if TICI else None)),
NativeProcess("soundd", "selfdrive/ui/soundd", ["./soundd"], persistent=True),
NativeProcess("locationd", "selfdrive/locationd", ["./locationd"]),
NativeProcess("boardd", "selfdrive/boardd", ["./boardd"], enabled=False),
PythonProcess("calibrationd", "selfdrive.locationd.calibrationd"),
PythonProcess("controlsd", "selfdrive.controls.controlsd"),
#PythonProcess("deleter", "selfdrive.loggerd.deleter", persistent=True),
PythonProcess("dmonitoringd", "selfdrive.monitoring.dmonitoringd", enabled=(not PC or WEBCAM), driverview=True),
#PythonProcess("logmessaged", "selfdrive.logmessaged", persistent=True),
PythonProcess("pandad", "selfdrive.pandad", persistent=True),
PythonProcess("paramsd", "selfdrive.locationd.paramsd"),
PythonProcess("plannerd", "selfdrive.controls.plannerd"),
PythonProcess("radard", "selfdrive.controls.radard"),
PythonProcess("thermald", "selfdrive.thermald.thermald", persistent=True),
PythonProcess("timezoned", "selfdrive.timezoned", enabled=TICI, persistent=True),
#PythonProcess("tombstoned", "selfdrive.tombstoned", enabled=not PC, persistent=True),
#PythonProcess("updated", "selfdrive.updated", enabled=not PC, persistent=True),
#PythonProcess("uploader", "selfdrive.loggerd.uploader", persistent=True),
#PythonProcess("statsd", "selfdrive.statsd", persistent=True),
#PythonProcess("mapd", "selfdrive.mapd.mapd", enabled=not PC, persistent=True),
# EON only
#PythonProcess("rtshield", "selfdrive.rtshield", enabled=EON),
#PythonProcess("shutdownd", "selfdrive.hardware.eon.shutdownd", enabled=EON),
PythonProcess("androidd", "selfdrive.hardware.eon.androidd", enabled=EON, persistent=True),
#PythonProcess("gpxd", "selfdrive.dragonpilot.gpxd"),
#PythonProcess("otisserv", "selfdrive.dragonpilot.otisserv", persistent=True),
# Experimental
#PythonProcess("rawgpsd", "selfdrive.sensord.rawgps.rawgpsd", enabled=os.path.isfile("/persist/comma/use-quectel-rawgps")),
]
if EnableLogger:
procs += [
NativeProcess("loggerd", "selfdrive/loggerd", ["./loggerd"]),
PythonProcess("logmessaged", "selfdrive.logmessaged", persistent=True),
PythonProcess("tombstoned", "selfdrive.tombstoned", enabled=not PC, persistent=True),
]
if EnableUploader:
procs += [
PythonProcess("deleter", "selfdrive.loggerd.deleter", persistent=True),
PythonProcess("uploader", "selfdrive.loggerd.uploader", persistent=True),
]
if EnableOSM:
procs += [
PythonProcess("mapd", "selfdrive.mapd.mapd", enabled=not PC, persistent=True),
]
if EnableMapbox:
procs += [
PythonProcess("gpxd", "selfdrive.dragonpilot.gpxd"),
PythonProcess("otisserv", "selfdrive.dragonpilot.otisserv", persistent=True),
]
if EnableShutdownD:
procs += [
PythonProcess("shutdownd", "selfdrive.hardware.eon.shutdownd", enabled=EON),
]
managed_processes = {p.name: p for p in procs}
| 54.034884 | 204 | 0.734668 |
c84576f2d475072d66bc8f1d751715423332a6ae
| 803 |
py
|
Python
|
problems/038.py
|
JoshKarpel/Euler
|
9c4a89cfe4b0114d84a82e2b2894c7b8af815e93
|
[
"MIT"
] | 1 |
2017-09-20T22:26:24.000Z
|
2017-09-20T22:26:24.000Z
|
problems/038.py
|
JoshKarpel/euler-python
|
9c4a89cfe4b0114d84a82e2b2894c7b8af815e93
|
[
"MIT"
] | null | null | null |
problems/038.py
|
JoshKarpel/euler-python
|
9c4a89cfe4b0114d84a82e2b2894c7b8af815e93
|
[
"MIT"
] | null | null | null |
from problems import mymath
def concatenated_product(integer, list_of_integers):
return ''.join([str(integer * n) for n in list_of_integers])
def solve():
concatenated_products = dict()
upper_bound = 1000000
digits_sorted = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
for test in range(2, upper_bound):
n = 1
product = ''
list_of_ints = []
while len(product) < 9:
n += 1
list_of_ints = list(range(1, n))
product = concatenated_product(test, list_of_ints)
if sorted(product) == digits_sorted:
concatenated_products[(test, len(list_of_ints))] = int(product)
max_key = mymath.key_of_max_value(concatenated_products)
return max_key
if __name__ == '__main__':
print(solve())
| 24.333333 | 75 | 0.60523 |
a87ff632fca02051b37bb5efb571360a9d3d675a
| 1,008 |
py
|
Python
|
renderers/fixed/text.py
|
etigerstudio/zilong-on-fire
|
5144a471b2d39ea38a47d394e648de00dd13cd8b
|
[
"MIT"
] | 2 |
2021-01-07T01:10:49.000Z
|
2022-01-21T09:37:16.000Z
|
renderers/fixed/text.py
|
etigerstudio/zilong-on-fire
|
5144a471b2d39ea38a47d394e648de00dd13cd8b
|
[
"MIT"
] | null | null | null |
renderers/fixed/text.py
|
etigerstudio/zilong-on-fire
|
5144a471b2d39ea38a47d394e648de00dd13cd8b
|
[
"MIT"
] | null | null | null |
from renderers.base import BaseRenderer
from time import sleep
from envs.base import StateFormat
class TextFixedRenderer(BaseRenderer):
def __init__(self, delay=0.1, state_format=StateFormat.VECTOR):
self.delay = delay
self.state_format = state_format
def setup(self, info=None):
print(f'GAME START {self.__get_info_text(info)}')
sleep(self.delay)
def update(self, state, info=None):
if self.state_format == StateFormat.VECTOR:
print(f'Zilong:{state[0]} Arrow:{state[1]}'
f' {self.__get_info_text(info)}')
elif self.state_format == StateFormat.MATRIX:
print(f'{state}'
f' {self.__get_info_text(info)}')
sleep(self.delay)
def close(self, info=None):
print(f'GAME OVER {self.__get_info_text(info)}\n')
sleep(self.delay)
def __get_info_text(self, info):
if info is not None and 'text' in info:
return info['text']
return ''
| 30.545455 | 67 | 0.623016 |
143230025c5bce864611860b09ce89ebab8079ba
| 12,550 |
py
|
Python
|
pcapkit/vendor/default.py
|
binref/PyPCAPKit
|
7c5ba2cfa95bdc80a95b53b6669340a8783d2ad9
|
[
"BSD-3-Clause"
] | 3 |
2018-01-21T15:22:21.000Z
|
2018-06-22T01:27:59.000Z
|
pcapkit/vendor/default.py
|
binref/PyPCAPKit
|
7c5ba2cfa95bdc80a95b53b6669340a8783d2ad9
|
[
"BSD-3-Clause"
] | null | null | null |
pcapkit/vendor/default.py
|
binref/PyPCAPKit
|
7c5ba2cfa95bdc80a95b53b6669340a8783d2ad9
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Default vendor generation."""
import abc
import collections
import contextlib
import csv
import inspect
import os
import re
import tempfile
import textwrap
import warnings
import webbrowser
from typing import TYPE_CHECKING
import requests
from pcapkit.utilities.exceptions import VendorNotImplemented
from pcapkit.utilities.warnings import VendorRequestWarning
if TYPE_CHECKING:
from collections import Counter
from typing import Callable, Optional
__all__ = ['Vendor']
#: Default constant template of enumerate registry from IANA CSV.
LINE = lambda NAME, DOCS, FLAG, ENUM, MISS: f'''\
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long
"""{DOCS}"""
from aenum import IntEnum, extend_enum
__all__ = ['{NAME}']
class {NAME}(IntEnum):
"""[{NAME}] {DOCS}"""
{ENUM}
@staticmethod
def get(key: 'int | str', default: 'int' = -1) -> '{NAME}':
"""Backport support for original codes."""
if isinstance(key, int):
return {NAME}(key)
if key not in {NAME}._member_map_: # pylint: disable=no-member
extend_enum({NAME}, key, default)
return {NAME}[key] # type: ignore[misc]
@classmethod
def _missing_(cls, value: 'int') -> '{NAME}':
"""Lookup function used when value is not found."""
if not ({FLAG}):
raise ValueError('%r is not a valid %s' % (value, cls.__name__))
{MISS}
{'' if ' return cls(value)' in MISS.splitlines()[-1:] else 'return super()._missing_(value)'}
'''.strip() # type: Callable[[str, str, str, str, str], str]
def get_proxies() -> 'dict[str, str]':
"""Get proxy for blocked sites.
The function will read :envvar:`PCAPKIT_HTTP_PROXY`
and :envvar:`PCAPKIT_HTTPS_PROXY`, if any, for the
proxy settings of |requests|_.
.. |requests| replace:: ``requests``
.. _requests: https://requests.readthedocs.io
Returns:
Proxy settings for |requests|_.
"""
HTTP_PROXY = os.getenv('PCAPKIT_HTTP_PROXY')
HTTPS_PROXY = os.getenv('PCAPKIT_HTTPS_PROXY')
PROXIES = dict()
if HTTP_PROXY is not None:
PROXIES['http'] = HTTP_PROXY
if HTTPS_PROXY is not None:
PROXIES['https'] = HTTPS_PROXY
return PROXIES
class Vendor(metaclass=abc.ABCMeta):
"""Default vendor generator.
Inherit this class with :attr:`~Vendor.FLAG` &
:attr:`~Vendor.LINK` attributes, etc. to implement
a new vendor generator.
"""
###############
# Macros
###############
#: str: Name of constant enumeration.
NAME: 'str'
#: str: Docstring of constant enumeration.
DOCS: 'str'
#: str: Value limit checker.
FLAG: 'str' = None # type: ignore[assignment]
#: str: Link to registry.
LINK: 'str' = None # type: ignore[assignment]
###############
# Processors
###############
@staticmethod
def wrap_comment(text: 'str') -> 'str':
"""Wraps long-length text to shorter lines of comments.
Args:
text: Source text.
Returns:
Wrapped comments.
"""
return '\n #: '.join(textwrap.wrap(text.strip(), 76))
def safe_name(self, name: 'str') -> 'str':
"""Convert enumeration name to :class:`enum.Enum` friendly.
Args:
name: original enumeration name
Returns:
Converted enumeration name.
"""
temp = '_'.join(
filter(
None,
re.sub(
r'\W',
'_',
'_'.join(
re.sub(
r'\(.*\)',
'',
name
).split(),
),
).split('_')
)
)
if temp.isidentifier():
return temp
return f'{self.NAME}_{temp}'
def rename(self, name: 'str', code: 'str', *, original: 'Optional[str]' = None) -> 'str': # pylint: disable=redefined-outer-name
"""Rename duplicated fields.
Args:
name: Field name.
code: Field code.
Keyword Args:
original: Original field name (extracted from CSV records).
Returns:
Revised field name.
Example:
If ``name`` has multiple occurrences in the source registry,
the field name will be sanitised as ``${name}_${code}``.
Otherwise, the plain ``name`` will be returned.
"""
index = original or name
if self.record[self.safe_name(index)] > 1 or self.safe_name(index).upper() in ['RESERVED', 'UNASSIGNED']:
name = f'{name}_{code}'
return self.safe_name(name)
def process(self, data: 'list[str]') -> 'tuple[list[str], list[str]]':
"""Process CSV data.
Args:
data: CSV data.
Returns:
Enumeration fields and missing fields.
"""
reader = csv.reader(data)
next(reader)
enum = [] # type: list[str]
miss = [] # type: list[str]
for item in reader:
name = item[1]
rfcs = item[2]
temp = [] # type: list[str]
for rfc in filter(None, re.split(r'\[|\]', rfcs)):
if 'RFC' in rfc and re.match(r'\d+', rfc[3:]):
#temp.append(f'[{rfc[:3]} {rfc[3:]}]')
temp.append(f'[:rfc:`{rfc[3:]}`]')
else:
temp.append(f'[{rfc}]'.replace('_', ' '))
desc = self.wrap_comment(re.sub(r'\r*\n', ' ', '%s %s' % (
name, ''.join(temp) if rfcs else '',
), re.MULTILINE))
try:
code, _ = item[0], int(item[0])
renm = self.rename(name, code)
pres = f'{renm} = {code}'
sufs = f'#: {desc}'
#if len(pres) > 74:
# sufs = f"\n{' '*80}{sufs}"
#enum.append(f'{pres.ljust(76)}{sufs}')
enum.append(f'{sufs}\n {pres}')
except ValueError:
start, stop = item[0].split('-')
miss.append(f'if {start} <= value <= {stop}:')
miss.append(f' #: {desc}')
miss.append(f" extend_enum(cls, '{self.safe_name(name)}_%d' % value, value)")
miss.append(' return cls(value)')
return enum, miss
def count(self, data: 'list[str]') -> 'Counter[str]': # pylint: disable=no-self-use
"""Count field records.
Args:
data: CSV data.
Returns:
Field recordings.
"""
reader = csv.reader(data)
next(reader) # header
return collections.Counter(map(lambda item: self.safe_name(item[1]), # pylint: disable=map-builtin-not-iterating
filter(lambda item: len(item[0].split('-')) != 2, reader))) # pylint: disable=filter-builtin-not-iterating
def context(self, data: 'list[str]') -> 'str':
"""Generate constant context.
Args:
data: CSV data.
Returns:
Constant context.
"""
enum, miss = self.process(data)
ENUM = '\n\n '.join(map(lambda s: s.rstrip(), enum)).strip()
MISS = '\n '.join(map(lambda s: s.rstrip(), miss)).strip()
return LINE(self.NAME, self.DOCS, self.FLAG, ENUM, MISS)
def request(self, text: 'Optional[str]' = None) -> 'list[str]': # pylint: disable=no-self-use
"""Fetch CSV file.
Args:
text: Context from :attr:`~Vendor.LINK`.
Returns:
CSV data.
"""
if text is None:
return []
return text.strip().split('\r\n')
###############
# Defaults
###############
def __new__(cls) -> 'Vendor':
"""Subclassing checkpoint.
Raises:
VendorNotImplemented: If ``cls`` is not a subclass of :class:`~pcapkit.vendor.default.Vendor`.
"""
if cls is Vendor:
raise VendorNotImplemented('cannot initiate Vendor instance')
return super().__new__(cls)
def __init__(self) -> 'None':
"""Generate new constant files."""
#: str: Name of constant enumeration.
self.NAME = type(self).__name__
#: str: Docstring of constant enumeration.
self.DOCS = type(self).__doc__ # type: ignore[assignment]
data = self._request()
self.record = self.count(data)
temp_ctx = [] # type: list[str]
orig_ctx = self.context(data)
for line in orig_ctx.splitlines():
if line:
if line.strip():
temp_ctx.append(line.rstrip())
else:
temp_ctx.append(line)
context = '\n'.join(temp_ctx)
temp, FILE = os.path.split(os.path.abspath(inspect.getfile(type(self))))
ROOT, STEM = os.path.split(temp)
os.makedirs(os.path.join(ROOT, '..', 'const', STEM), exist_ok=True)
with open(os.path.join(ROOT, '..', 'const', STEM, FILE), 'w') as file:
print(context, file=file)
def _request(self) -> 'list[str]':
"""Fetch CSV data from :attr:`~Vendor.LINK`.
This is the low-level call of :meth:`~Vendor.request`.
If :attr:`~Vendor.LINK` is ``None``, it will directly
call the upper method :meth:`~Vendor.request` with **NO**
arguments.
The method will first try to *GET* the content of :attr:`~Vendor.LINK`.
Should any exception raised, it will first try with proxy settings from
:func:`~pcapkit.vendor.default.get_proxies`.
.. note::
Since some :attr:`~Vendor.LINK` links are from Wikipedia, etc., they
might not be available in certain areas, e.g. the amazing PRC :)
Would proxies failed again, it will prompt for user intervention, i.e.
it will use :func:`webbrowser.open` to open the page in browser for you, and
you can manually load that page and save the HTML source at the location
it provides.
Returns:
CSV data.
Warns:
VendorRequestWarning: If connection failed with and/or without proxies.
See Also:
:meth:`~Vendor.request`
"""
if self.LINK is None:
return self.request() # type: ignore[unreachable]
try:
page = requests.get(self.LINK)
except requests.RequestException:
warnings.warn('Connection failed; retry with proxies (if any)...', VendorRequestWarning, stacklevel=2)
try:
proxies = get_proxies() or None
if proxies is None:
raise requests.RequestException
page = requests.get(self.LINK, proxies=proxies)
except requests.RequestException:
warnings.warn('Connection failed; retry with manual intervene...', VendorRequestWarning, stacklevel=2)
with tempfile.TemporaryDirectory(suffix='-tempdir',
prefix='pcapkit-',
dir=os.path.abspath(os.curdir)) as tempdir:
temp_file = os.path.join(tempdir, f'{self.NAME}.html')
flag = False
with contextlib.suppress(Exception):
flag = webbrowser.open(self.LINK)
if flag:
print('Please save the page source at')
print(f' {temp_file}')
else:
print('Please navigate to the following address')
print(f' {self.LINK}')
print('and save the page source at')
print(f' {temp_file}')
while True:
with contextlib.suppress(Exception):
input('Press ENTER to continue...') # nosec
if os.path.isfile(temp_file):
break
print('File not found; please save the page source at')
print(f' {temp_file}')
with open(temp_file) as file:
text = file.read()
else:
text = page.text
else:
text = page.text
return self.request(text)
| 31.375 | 146 | 0.520637 |
68ae4afeed335286421a9b9bd28f90e1232a9016
| 4,359 |
py
|
Python
|
solrdataimport/payload.py
|
pisceanfoot/solrdataimport
|
a7f97cda5eb4ff569e67e5636a9217e9fe1a5fb5
|
[
"Apache-2.0"
] | 2 |
2018-03-13T02:08:08.000Z
|
2019-07-08T03:33:26.000Z
|
solrdataimport/payload.py
|
pisceanfoot/solrdataimport
|
a7f97cda5eb4ff569e67e5636a9217e9fe1a5fb5
|
[
"Apache-2.0"
] | null | null | null |
solrdataimport/payload.py
|
pisceanfoot/solrdataimport
|
a7f97cda5eb4ff569e67e5636a9217e9fe1a5fb5
|
[
"Apache-2.0"
] | 1 |
2019-07-08T03:33:32.000Z
|
2019-07-08T03:33:32.000Z
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals, \
with_statement
import json
from solrdataimport.lib.map import Map
class Payload:
"""
load section list from json file
Payload.load('json_file_path')
{
"name": "key of the config, and also set as solr core name if core_name not present",
"core_name": "solr core name",
"index_name": "index name for elastic search",
"type_name": "type name for elastic search",
"table": "cassandra table name (keyspace.table), will use in cql like select * from table.",
"key": ["table key and partition key list"],
"nest": [{
"table": "table name, use like select * from table_parent inner join this_table",
"nestKey": {
"nest_table_key": "parent_table_key", # select * from table_parent inner join this_table where this_table.nest_table_key = table_parent.parent_table_key
"nest_table_key2": "parent_table_key2"
},
"cache": Ture or False # nest table can be cachable
"condition": {
"filed": "value" # field should equals to value
},
"alias": {
"field": "new name"
}
}],
"combine": [{ # combine result will set as a JSON field in parent doc
# one record will set like "name": {}
# mutil records will set as "name": [{}]
# also can set as "name": JSON.stringify(...)
"table": "table name",
"combineKey": { # same as nestKey
"nest_table_key": "parent_table_key", # select * from table_parent inner join this_table where this_table.nest_table_key = table_parent.parent_table_key
"nest_table_key2": "parent_table_key2"
},
"field_name": "new field name",
"field_type": "string", # string or object
"field_map_one2one": True,
"cache": Ture or False # nest table can be cachable
"condition": {
"filed": "value" # field should equals to value
}
}],
"documentId": ["value for solr _id"],
"documentField":["solr filed"],
"exclude": ["field name"]
}
"""
sectionList=None
@classmethod
def load(cls, config_file):
sectionList = []
with open(config_file) as f:
jsonObject = json.load(f, encoding='utf-8')
if jsonObject:
for section in jsonObject:
section_map = Map(section)
if section_map.nest:
array = []
for nest in section_map.nest:
section_nest = Map(nest)
if section_nest.condition:
section_nest.condition = lower_case_dict(section_nest, 'condition')
if section_nest.alias:
section_nest.alias = lower_case_dict(section_nest, 'alias', value_lower=True)
array.append(section_nest)
section_map.nest = array
if section_map.combine:
array = []
for combine in section_map.combine:
section_combine = Map(combine)
if section_combine.condition:
section_combine.condition = lower_case_dict(section_combine, 'condition')
array.append(section_combine)
section_map.combine = array
if section_map.exclude:
section_map.exclude = map(lower_case, section_map.exclude)
if section_map.documentId:
section_map.documentId = map(lower_case, section_map.documentId)
if section_map.documentField:
section_map.documentField = map(lower_case, section_map.documentField)
if section_map.condition:
section_map.condition = lower_case_dict(section_map, 'condition')
if section_map.alias:
section_map.alias = lower_case_dict(section_map, 'alias', value_lower=True)
sectionList.append(section_map)
cls.sectionList = sectionList
@classmethod
def get(cls, name):
for x in cls.sectionList:
if x.name == name:
return x
return None
@classmethod
def get_all_index(cls, index_name):
array = []
for x in cls.sectionList:
if x.index_name == index_name:
array.append(x)
return array
def lower_case(x):
return x.lower()
def lower_case_dict(section_map, field, value_lower=False):
value = section_map.get(field)
if not value:
return value
new_dic = {}
for x in value:
data = value[x]
if value_lower and hasattr(data, 'lower'):
data = data.lower()
new_dic[x.lower()] = data
return new_dic
if __name__ == '__main__':
config_file='/Users/leo/Documents/Workspace/OpenDev/solrdataimport/test.json'
Payload.load(config_file)
print(Payload.sectionList)
# print(Payload.get('01_load.userinfo'))
| 29.856164 | 156 | 0.686167 |
fcab3d9e33fbcb97ac292c66cd953b6ba30252e9
| 1,471 |
py
|
Python
|
tests/support/cli_scripts.py
|
magenta-aps/salt
|
b6c78ecba697b1d7ba96ea95be300d12d8abf2d1
|
[
"Apache-2.0"
] | 1 |
2021-09-06T00:14:04.000Z
|
2021-09-06T00:14:04.000Z
|
tests/support/cli_scripts.py
|
magenta-aps/salt
|
b6c78ecba697b1d7ba96ea95be300d12d8abf2d1
|
[
"Apache-2.0"
] | 2 |
2021-04-30T21:17:57.000Z
|
2021-12-13T20:40:23.000Z
|
tests/support/cli_scripts.py
|
magenta-aps/salt
|
b6c78ecba697b1d7ba96ea95be300d12d8abf2d1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
tests.support.cli_scripts
~~~~~~~~~~~~~~~~~~~~~~~~~
Code to generate Salt CLI scripts for test runs
"""
# Import Python Libs
from __future__ import absolute_import, unicode_literals
import logging
import os
import sys
# Import Pytest Salt libs
from pytestsalt.utils import cli_scripts
log = logging.getLogger(__name__)
def get_script_path(bin_dir, script_name):
"""
Return the path to a testing runtime script, generating one if it does not yet exist
"""
# Late import
from tests.support.runtests import RUNTIME_VARS
if not os.path.isdir(bin_dir):
os.makedirs(bin_dir)
cli_script_name = "cli_{}.py".format(script_name.replace("-", "_"))
script_path = os.path.join(bin_dir, cli_script_name)
if not os.path.isfile(script_path):
cli_scripts.generate_script(
bin_dir=bin_dir,
script_name=script_name,
executable=sys.executable,
code_dir=RUNTIME_VARS.CODE_DIR,
inject_sitecustomize="COVERAGE_PROCESS_START" in os.environ,
)
log.info("Returning script path %r", script_path)
return script_path
class ScriptPathMixin(object):
def get_script_path(self, script_name):
"""
Return the path to a testing runtime script
"""
# Late import
from tests.support.runtests import RUNTIME_VARS
return get_script_path(RUNTIME_VARS.TMP_SCRIPT_DIR, script_name)
| 26.267857 | 88 | 0.673691 |
0e973379224837b9f2f2aa2b72d5530468d05eb6
| 1,081 |
py
|
Python
|
python/usaco/chapter2/holstein.py
|
kumaratinfy/Problem-Solving
|
be9e3b8a630e4126f150b9e7f03c2f3290ba3255
|
[
"MIT"
] | null | null | null |
python/usaco/chapter2/holstein.py
|
kumaratinfy/Problem-Solving
|
be9e3b8a630e4126f150b9e7f03c2f3290ba3255
|
[
"MIT"
] | null | null | null |
python/usaco/chapter2/holstein.py
|
kumaratinfy/Problem-Solving
|
be9e3b8a630e4126f150b9e7f03c2f3290ba3255
|
[
"MIT"
] | null | null | null |
"""
ID: kumar.g1
LANG: PYTHON2
TASK: holstein
"""
import itertools
fin = open ('holstein.in', 'r')
fout = open ('holstein.out', 'w')
V = int(fin.readline().rstrip("\n"))
reqmt = map(int, fin.readline().rstrip("\n").split(" "))
G = int(fin.readline().rstrip("\n"))
feeds = []
for _ in range(G):
feeds.append(map(int, fin.readline().rstrip("\n").split(" ")))
def feed_combinations():
indexes = range(G)
for i in range(1, G+1):
yield itertools.combinations(indexes, i)
def valid_combination(possible_sln):
for v in range(V):
if reqmt[v] > possible_sln[v]:
return False
return True
def print_sln(c):
fout.write(str(len(c)) + " ")
for i in range(len(c)-1):
fout.write(str(c[i]+1) + " ")
fout.write(str(c[len(c)-1]+1) + "\n")
found = False
for combination in feed_combinations():
if found:
break
for c in combination:
possible_sln = []
for v in range(V):
possible_sln.append(sum([feeds[i][v] for i in c]))
if valid_combination(possible_sln):
print_sln(c)
found = True
break
fin.close()
fout.close()
| 20.788462 | 64 | 0.619796 |
871b8586e5782ac0386859d36a2fb92fe1626e02
| 295 |
py
|
Python
|
logical/converter/qiskit/qobj/__init__.py
|
malcolmregan/GateCircuit-to-AnnealerEmbedding
|
33a1a4ea2ebd707ade0677e0df468d5120a861db
|
[
"Apache-2.0"
] | null | null | null |
logical/converter/qiskit/qobj/__init__.py
|
malcolmregan/GateCircuit-to-AnnealerEmbedding
|
33a1a4ea2ebd707ade0677e0df468d5120a861db
|
[
"Apache-2.0"
] | 1 |
2019-04-09T02:22:38.000Z
|
2019-04-09T02:22:38.000Z
|
logical/converter/qiskit/qobj/__init__.py
|
malcolmregan/GateCircuit-to-AnnealerEmbedding
|
33a1a4ea2ebd707ade0677e0df468d5120a861db
|
[
"Apache-2.0"
] | null | null | null |
from ._qobj import (Qobj, QobjConfig, QobjExperiment, QobjInstruction,
QobjItem, QobjHeader, QobjExperimentHeader)
from ._converter import qobj_to_dict
from ._validation import validate_qobj_against_schema, QobjValidationError
from ._result import Result, ExperimentResult
| 32.777778 | 74 | 0.79661 |
6376ba5e59cbdc431c11ebf8583a2212a05b4e27
| 5,223 |
py
|
Python
|
opennmt/models/transformer.py
|
nilboy/OpenNMT-tf
|
00e6dc451d3fc96973d13839a71290f007c0b9d7
|
[
"MIT"
] | null | null | null |
opennmt/models/transformer.py
|
nilboy/OpenNMT-tf
|
00e6dc451d3fc96973d13839a71290f007c0b9d7
|
[
"MIT"
] | null | null | null |
opennmt/models/transformer.py
|
nilboy/OpenNMT-tf
|
00e6dc451d3fc96973d13839a71290f007c0b9d7
|
[
"MIT"
] | null | null | null |
"""Define the Google's Transformer model."""
import tensorflow as tf
from opennmt.models.sequence_to_sequence import SequenceToSequence, EmbeddingsSharingLevel
from opennmt.encoders.encoder import ParallelEncoder
from opennmt.encoders.self_attention_encoder import SelfAttentionEncoder
from opennmt.decoders.self_attention_decoder import SelfAttentionDecoder
from opennmt.layers.position import SinusoidalPositionEncoder
from opennmt.utils.misc import merge_dict
class Transformer(SequenceToSequence):
"""Attention-based sequence-to-sequence model as described in
https://arxiv.org/abs/1706.03762.
"""
def __init__(self,
source_inputter,
target_inputter,
num_layers,
num_units,
num_heads,
ffn_inner_dim,
dropout=0.1,
attention_dropout=0.1,
ffn_dropout=0.1,
ffn_activation=tf.nn.relu,
position_encoder_class=SinusoidalPositionEncoder,
share_embeddings=EmbeddingsSharingLevel.NONE,
share_encoders=False,
maximum_relative_position=None):
"""Initializes a Transformer model.
Args:
source_inputter: A :class:`opennmt.inputters.Inputter` to process
the source data. If this inputter returns parallel inputs, a multi
source Transformer architecture will be constructed.
target_inputter: A :class:`opennmt.inputters.Inputter` to process
the target data. Currently, only the
:class:`opennmt.inputters.WordEmbedder` is supported.
num_layers: The shared number of layers.
num_units: The number of hidden units.
num_heads: The number of heads in each self-attention layers.
ffn_inner_dim: The inner dimension of the feed forward layers.
dropout: The probability to drop units in each layer output.
attention_dropout: The probability to drop units from the attention.
ffn_dropout: The probability to drop units from the ReLU activation in
the feed forward layer.
ffn_activation: The activation function to apply between the two linear
transformations of the feed forward layer.
position_encoder_class: The :class:`opennmt.layers.PositionEncoder`
class to use for position encoding (or a callable that returns an
instance).
share_embeddings: Level of embeddings sharing, see
:class:`opennmt.models.EmbeddingsSharingLevel` for possible values.
share_encoders: In case of multi source architecture, whether to share the
separate encoders parameters or not.
maximum_relative_position: Maximum relative position representation
(from https://arxiv.org/abs/1803.02155).
"""
encoders = [
SelfAttentionEncoder(
num_layers,
num_units=num_units,
num_heads=num_heads,
ffn_inner_dim=ffn_inner_dim,
dropout=dropout,
attention_dropout=attention_dropout,
ffn_dropout=ffn_dropout,
ffn_activation=ffn_activation,
position_encoder_class=position_encoder_class,
maximum_relative_position=maximum_relative_position)
for _ in range(source_inputter.num_outputs)]
if len(encoders) > 1:
encoder = ParallelEncoder(
encoders if not share_encoders else encoders[0],
outputs_reducer=None,
states_reducer=None)
else:
encoder = encoders[0]
decoder = SelfAttentionDecoder(
num_layers,
num_units=num_units,
num_heads=num_heads,
ffn_inner_dim=ffn_inner_dim,
dropout=dropout,
attention_dropout=attention_dropout,
ffn_dropout=ffn_dropout,
ffn_activation=ffn_activation,
position_encoder_class=position_encoder_class,
num_sources=source_inputter.num_outputs,
maximum_relative_position=maximum_relative_position)
self._num_units = num_units
super(Transformer, self).__init__(
source_inputter,
target_inputter,
encoder,
decoder,
share_embeddings=share_embeddings)
def auto_config(self, num_replicas=1):
config = super(Transformer, self).auto_config(num_replicas=num_replicas)
return merge_dict(config, {
"params": {
"average_loss_in_time": True,
"label_smoothing": 0.1,
"optimizer": "LazyAdam",
"optimizer_params": {
"beta_1": 0.9,
"beta_2": 0.998
},
"learning_rate": 2.0,
"decay_type": "NoamDecay",
"decay_params": {
"model_dim": self._num_units,
"warmup_steps": 8000
}
},
"train": {
"effective_batch_size": 25000,
"batch_size": 3072,
"batch_type": "tokens",
"maximum_features_length": 100,
"maximum_labels_length": 100,
"keep_checkpoint_max": 8,
"average_last_checkpoints": 8
}
})
def map_v1_weights(self, weights):
weights["seq2seq"] = weights.pop("transformer")
return super(Transformer, self).map_v1_weights(weights)
| 38.688889 | 90 | 0.659774 |
6c347bb934669c3b4caf837c05c1bc0d847e2591
| 1,834 |
py
|
Python
|
app/blueprints/auth/routes/confirm.py
|
neurothrone/project-dot
|
20889075611bed645689a76a30257f96e4b55988
|
[
"MIT"
] | null | null | null |
app/blueprints/auth/routes/confirm.py
|
neurothrone/project-dot
|
20889075611bed645689a76a30257f96e4b55988
|
[
"MIT"
] | null | null | null |
app/blueprints/auth/routes/confirm.py
|
neurothrone/project-dot
|
20889075611bed645689a76a30257f96e4b55988
|
[
"MIT"
] | null | null | null |
from flask import flash, redirect, render_template, request, url_for
from flask_login import current_user, login_required
from app.blueprints.auth import bp_auth
from app.services import email_service
@bp_auth.before_app_request
def before_request():
if current_user.is_authenticated:
# TODO: called multiple times -> performance check
# print("PING ME")
if not current_user.is_confirmed \
and request.endpoint \
and request.blueprint != "auth" \
and request.endpoint != "static":
return redirect(url_for("auth.unconfirmed"))
else:
pass
# current_user.ping()
@bp_auth.route("/confirm/<token>")
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for("open.index"))
if current_user.confirm(token):
flash("You have confirmed your account. Thanks!", category="success")
return redirect(url_for("user.edit_profile"))
else:
flash("The confirmation link is invalid or has expired.", category="error")
return redirect(url_for("open.index"))
@bp_auth.route("/unconfirmed")
def unconfirmed():
if current_user.is_anonymous or current_user.is_confirmed:
return redirect(url_for("open.index"))
return render_template("auth/unconfirmed.html",
title="DoT - Confirm Your Account")
@bp_auth.route("/confirm")
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
email_service.send_email(current_user.email, "Confirm Your Account",
"auth/email/confirm", user=current_user, token=token)
flash("A new confirmation email has been sent to you by email.", category="success")
return redirect(url_for("open.index"))
| 32.75 | 88 | 0.676118 |
ec43fe0d488358049728fe978556c36abaf5d9bd
| 170 |
py
|
Python
|
vkbottle/api/token_generator/__init__.py
|
homus32/vkbottle
|
8247665ef74835abe0c2c5e5981826540d0ecdb5
|
[
"MIT"
] | 698 |
2019-08-09T17:32:52.000Z
|
2021-07-22T08:30:32.000Z
|
vkbottle/api/token_generator/__init__.py
|
homus32/vkbottle
|
8247665ef74835abe0c2c5e5981826540d0ecdb5
|
[
"MIT"
] | 216 |
2019-08-18T19:22:50.000Z
|
2021-07-30T12:15:17.000Z
|
vkbottle/api/token_generator/__init__.py
|
homus32/vkbottle
|
8247665ef74835abe0c2c5e5981826540d0ecdb5
|
[
"MIT"
] | 268 |
2019-08-10T14:52:04.000Z
|
2021-07-28T07:06:42.000Z
|
from .abc import ABCTokenGenerator, Token
from .consistent import ConsistentTokenGenerator
from .single import SingleTokenGenerator
from .util import get_token_generator
| 34 | 48 | 0.870588 |
7357589259fccf4cda5bba60d77fe2abab09f9a8
| 10,369 |
py
|
Python
|
benchmark/run.py
|
chuanli11/transformers
|
330dc0273d45f5583eee72371bfa038e1b6b5fa8
|
[
"Apache-2.0"
] | null | null | null |
benchmark/run.py
|
chuanli11/transformers
|
330dc0273d45f5583eee72371bfa038e1b6b5fa8
|
[
"Apache-2.0"
] | null | null | null |
benchmark/run.py
|
chuanli11/transformers
|
330dc0273d45f5583eee72371bfa038e1b6b5fa8
|
[
"Apache-2.0"
] | null | null | null |
import sys
import os
from multiprocessing import Queue
import subprocess
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(1, dir_path + '/../src')
from typing import Callable
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
from transformers.models.auto.configuration_auto import AutoConfig
inference = True
train = True
memory = False
speed = True
model_names = ["bert-base-uncased"]
batch_sizes = [32, 64, 128]
sequence_lengths = [64]
precision = "fp32"
repeat = 2
number = 10
number_warmup = 5
num_gpu = 1
optimize = True
backend = 'nccl'
config_dict = {
model_name: AutoConfig.from_pretrained(model_name) for model_name in model_names
}
def setup(rank: int, world_size: int, backend: str):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
# initialize the process group
dist.init_process_group(backend, rank=rank, world_size=world_size)
def cleanup():
dist.destroy_process_group()
def run_ddp(ddp_func: Callable[[], None], world_size: int, number: int, model_name: str, batch_size: int, sequence_length: int, optimize: bool, backend: str):
tqueue = mp.get_context('spawn').SimpleQueue()
mp.spawn(ddp_func,
args=(world_size, number, model_name, batch_size, sequence_length, optimize, backend, tqueue),
nprocs=world_size,
join=True)
return tqueue.get()
def inference_func(number: int, model_name: str, batch_size: int, sequence_length: int):
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
config = config_dict[model_name]
has_model_class_in_config = (
hasattr(config, "architectures")
and isinstance(config.architectures, list)
and len(config.architectures) > 0
)
if has_model_class_in_config:
try:
model_class = config.architectures[0]
transformers_module = __import__("transformers", fromlist=[model_class])
model_cls = getattr(transformers_module, model_class)
model = model_cls(config)
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`."
)
model.eval()
model.to(device)
# encoder-decoder has vocab size saved differently
vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=device)
if precision == "fp16":
if not device.type == 'cuda':
raise ValueError("Mixed precision is possible only for GPU.")
# amp seems to have memory leaks so that memory usage
# is measured using .half() for now https://github.com/NVIDIA/apex/issues/439
model.half()
inference_model = model
def encoder_decoder_forward():
with torch.no_grad():
for i_batch in range(number_warmup):
outputs = inference_model(input_ids, decoder_input_ids=input_ids)
t0 = time.time()
for i_batch in range(number):
outputs = inference_model(input_ids, decoder_input_ids=input_ids)
torch.cuda.current_stream().synchronize()
t1 = time.time()
return t1 - t0
def encoder_forward():
with torch.no_grad():
for i_batch in range(number_warmup):
outputs = inference_model(input_ids)
t0 = time.time()
for i_batch in range(number):
outputs = inference_model(input_ids)
torch.cuda.current_stream().synchronize()
t1 = time.time()
return t1 - t0
func = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return func()
def train_func(rank: int, num_gpu: int, number: int, model_name: str, batch_size: int, sequence_length: int, optimize: bool, backend: str, tqueue: Queue):
setup(rank, num_gpu, backend)
config = config_dict[model_name]
has_model_class_in_config = (
hasattr(config, "architectures")
and isinstance(config.architectures, list)
and len(config.architectures) > 0
)
if has_model_class_in_config:
try:
model_class = config.architectures[0]
transformers_module = __import__("transformers", fromlist=[model_class])
model_cls = getattr(transformers_module, model_class)
model = model_cls(config)
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`."
)
model.to(rank)
model = DDP(model, device_ids=[rank])
if optimize:
optimizer = optim.SGD(model.parameters(), lr=0.001)
# encoder-decoder has vocab size saved differently
vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long).to(rank)
if precision == "fp16":
if torch.cuda.device_count() < rank + 1:
raise ValueError("Mixed precision is possible only for GPU.")
# amp seems to have memory leaks so that memory usage
# is measured using .half() for now https://github.com/NVIDIA/apex/issues/439
model.half()
def compute_loss_and_backprob_encoder():
for i_batch in range(number_warmup):
loss = model(input_ids, labels=input_ids)[0]
loss.backward()
if optimize:
optimizer.step()
if rank == 0:
t0 = time.time()
for i_batch in range(number):
loss = model(input_ids, labels=input_ids)[0]
loss.backward()
if optimize:
optimizer.step()
if rank == 0:
t1 = time.time()
tqueue.put(t1 - t0)
def compute_loss_and_backprob_encoder_decoder():
for i_batch in range(number_warmup):
loss = model(input_ids, labels=input_ids)[0]
loss.backward()
if optimize:
optimizer.step()
if rank == 0:
t0 = time.time()
for i_batch in range(number):
loss = model(input_ids, decoder_input_ids=input_ids, labels=input_ids)[0]
loss.backward()
if optimize:
optimizer.step()
if rank == 0:
t1 = time.time()
tqueue.put(t1 - t0)
func = (
compute_loss_and_backprob_encoder_decoder
if config.is_encoder_decoder
else compute_loss_and_backprob_encoder
)
func()
cleanup()
if __name__ == "__main__":
info = {}
try:
subprocess.check_output('nvidia-smi >/dev/null 2>&1', shell=True)
import py3nvml.py3nvml as nvml
nvml.nvmlInit()
handle = nvml.nvmlDeviceGetHandleByIndex(0)
info["gpu"] = nvml.nvmlDeviceGetName(handle).replace(" ", "_")
nvml.nvmlShutdown()
except:
first = ["rocm-smi", "-d 0", "--showproductname"]
second = ["awk", '/Card SKU/ { print $5 }']
p1 = subprocess.Popen(first, stdout=subprocess.PIPE)
p2 = subprocess.Popen(second, stdin=p1.stdout, stdout=subprocess.PIPE)
out, err = p2.communicate()
info["gpu"] = 'AMD_' + out.decode().strip('\n')
for c, model_name in enumerate(model_names):
print(f"{c + 1} / {len(model_names)}")
model_dict = {
"bs": batch_sizes,
"ss": sequence_lengths,
"result": {i: {} for i in batch_sizes},
}
for batch_size in batch_sizes:
for sequence_length in sequence_lengths:
if inference:
for i_run in range(repeat):
throughputs = []
try:
t = inference_func(
number,
model_name,
batch_size,
sequence_length
)
except:
t = 0
print(f"BS: {batch_size}, Sequence Length: {sequence_length}, {model_name} didn't work for inference in {precision}. Maybe OOM")
if t > 0:
throughputs.append(batch_size * number / t)
if len(throughputs) > 0:
print(min(throughputs))
if train:
for i_run in range(repeat):
throughputs = []
try:
t = run_ddp(
train_func,
num_gpu,
number,
model_name,
batch_size,
sequence_length,
optimize,
backend
)
except:
t = 0
print(f"BS: {batch_size}, Sequence Length: {sequence_length}, {model_name} didn't work for {num_gpu} DDP training in {precision}. Maybe OOM")
if t > 0:
throughputs.append(batch_size * number * num_gpu / t)
if len(throughputs) > 0:
print(min(throughputs))
| 35.632302 | 177 | 0.555502 |
7486c6b057a29c327ce7eab12d056c786e567f40
| 406 |
py
|
Python
|
TWLight/applications/migrations/0019_application_account_email.py
|
saloniig/TWLight
|
cd92e690b79676299d95394abf9e66885eac9d73
|
[
"MIT"
] | 2 |
2020-01-17T09:14:55.000Z
|
2020-01-17T09:15:20.000Z
|
TWLight/applications/migrations/0019_application_account_email.py
|
saloniig/TWLight
|
cd92e690b79676299d95394abf9e66885eac9d73
|
[
"MIT"
] | 11 |
2022-03-18T18:05:40.000Z
|
2022-03-18T18:06:04.000Z
|
TWLight/applications/migrations/0019_application_account_email.py
|
saloniig/TWLight
|
cd92e690b79676299d95394abf9e66885eac9d73
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("applications", "0018_remove_application_earliest_expiry_date")]
operations = [
migrations.AddField(
model_name="application",
name="account_email",
field=models.CharField(max_length=64, null=True, blank=True),
)
]
| 22.555556 | 85 | 0.640394 |
15ba561570a6c2b6dcf5888d19b0cd554351258d
| 18,483 |
py
|
Python
|
lesson_4_create_GMNS_network/osm2gmns/OSM2GMNS.py
|
asu-trans-ai-lab/traffic-engineering-and-analysis
|
19e5718dabee7e3eb200937b5ce2d7e5e2d3fbf1
|
[
"CC0-1.0"
] | null | null | null |
lesson_4_create_GMNS_network/osm2gmns/OSM2GMNS.py
|
asu-trans-ai-lab/traffic-engineering-and-analysis
|
19e5718dabee7e3eb200937b5ce2d7e5e2d3fbf1
|
[
"CC0-1.0"
] | null | null | null |
lesson_4_create_GMNS_network/osm2gmns/OSM2GMNS.py
|
asu-trans-ai-lab/traffic-engineering-and-analysis
|
19e5718dabee7e3eb200937b5ce2d7e5e2d3fbf1
|
[
"CC0-1.0"
] | 1 |
2020-12-03T16:01:35.000Z
|
2020-12-03T16:01:35.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 19 12:44:27 2018
@author: Jiawei(Jay) Lu ([email protected])
@author: Xuesong(Simon) Zhou ([email protected])
"""
import numpy as np
import csv
from shapely.geometry import LineString
import osmnx as ox
import copy
# city = 'Xuanwu District, Nanjing, Jiangsu, China'
bbox = [32.082663,32.000739,118.811811,118.742919] # north, south, east, west 32.082663, 118.811811;32.000739, 118.742919
generate_demand = True
new_node_id_starting_from_one = True # assign new ids to nodes to facilitate visualization in the NEXTA (avoid large ids)
new_link_id_starting_from_one = True # assign new ids to links to facilitate visualization in the NEXTA (avoid large ids)
use_default_value = True
default_number_of_lanes = {'motorway':4,'trunk':3,'primary':3,'secondary':2,'tertiary':2,'residential':1,'others':1,'pedestrian':1}
default_speed_limit = {'motorway':59,'trunk':39,'primary':39,'secondary':39,'tertiary':29,'residential':29,'others':29,'pedestrian':1}
default_lane_cap = {'motorway':1799,'trunk':1799,'primary':1199,'secondary':999,'tertiary':799,'residential':699,'others':699,'pedestrian':1}
osm_link_type_dict = {'motorway':'motorway',
'motorway_link':'motorway',
'trunk':'trunk',
'trunk_link':'trunk',
'primary':'primary',
'primary_link':'primary',
'secondary':'secondary',
'secondary_link':'secondary',
'tertiary':'tertiary',
'tertiary_link':'tertiary',
'residential':'residential',
'pedestrian':'pedestrian'} # link types not in this dict will be represented by 'others'
g_number_of_macro_nodes = 0
g_number_of_macro_links = 0
g_number_of_zones = 0
node_attributes_list = []
link_attributes_list = []
g_macro_node_list = []
g_macro_link_list = []
g_zone_list = []
g_demand_list = [] # (o, d, value)
g_node_id_to_seq_no_dict = {}
g_original_node_id_to_new_node_id_dict = {}
link_type_code = 1
link_type_code_dict = {}
for link_type_name in default_number_of_lanes.keys():
link_type_code_dict[link_type_name] = link_type_code
link_type_code += 1
class MacroNode:
def __init__(self):
self.name = ''
self.node_id = 0
self.original_node_id = 0
self.node_seq_no = 0
self.zone_id = None
self.control_type = ''
self.x_coord = 0.0
self.y_coord = 0.0
self.node_type = ''
self.geometry = ''
self.m_outgoing_link_list = []
self.m_incoming_link_list = []
self.activity_type = ''
self.adjacent_link_type_count_dict = {}
self.is_boundary = False
# def Initialization(self):
# global g_number_of_macro_nodes
# self.node_seq_no = g_number_of_macro_nodes
# g_internal_macro_node_seq_no_dict[self.node_id] = g_number_of_macro_nodes
# g_number_of_macro_nodes += 1
class MacroLink:
def __init__(self):
self.name = ''
self.link_id = ''
self.original_link_id = ''
self.link_key = ''
self.from_node_id = 0
self.to_node_id = 0
self.link_type = ''
self.link_type_code = 0
self.direction = 1
self.length = 0.0
# self.length = float(length)/1000 if units == 1 else float(length)/1000*0.6214
self.number_of_lanes = None
self.speed_limit = None
# if maxspeed != maxspeed:
# if use_default_value == 1:
# self.speed_limit = default_speed_limit[link_type] if units == 1 else default_speed_limit[link_type]/1.61
#
# else:
# self.speed_limit = ''
# else:
# if (units == 1) and ('mph' not in maxspeed):
# self.speed_limit = float(maxspeed)
# elif (units == 1) and ('mph' in maxspeed):
# self.speed_limit = float(maxspeed[:-4])*1.61
# elif (units == 2) and ('mph' not in maxspeed):
# self.speed_limit = float(maxspeed)/1.61
# else:
# self.speed_limit = float(maxspeed[:-4])
self.capacity = None
self.geometry = None
self.from_node = None
self.to_node = None
class Demand:
def __init__(self,o_zone_id,d_zone_id,value,demand_type):
self.o_zone_id = o_zone_id
self.d_zone_id = d_zone_id
self.value = value
self.demand_type = demand_type
def GetNetwork():
global g_number_of_macro_nodes
global g_number_of_macro_links
print('downloading the target network from osm database')
G = ox.graph_from_bbox(*bbox, network_type='all')
# G = ox.graph_from_place(city, network_type='drive') # G_projected = ox.project_graph(G)
node_attributes_df = ox.graph_to_gdfs(G, edges=False)
# node_attributes_df.to_csv('node_attributes1.csv',index=False)
link_attributes_df = ox.graph_to_gdfs(G, nodes=False)
# link_attributes_df.to_csv('link_attributes1.csv',index=False)
G_proj = ox.project_graph(G)
G2 = ox.consolidate_intersections(G_proj, rebuild_graph=True, tolerance=15, dead_ends=False)
node_attributes_df = ox.graph_to_gdfs(G2, edges=False)
# node_attributes_df.to_csv('node_attributes2.csv',index=False)
link_attributes_df = ox.graph_to_gdfs(G2, nodes=False)
# link_attributes_df.to_csv('link_attributes2.csv',index=False)
print('generating macro nodes')
node_attributes_df = ox.graph_to_gdfs(G, edges=False)
node_df_index_list = node_attributes_df.index
for node_index in node_df_index_list:
node = MacroNode()
node.original_node_id = node_attributes_df.loc[node_index,'osmid']
if new_node_id_starting_from_one:
node.node_id = g_number_of_macro_nodes + 1
g_original_node_id_to_new_node_id_dict[node.original_node_id] = node.node_id
else:
node.node_id = node.original_node_id
node.node_seq_no = g_number_of_macro_nodes
node.x_coord = node_attributes_df.loc[node_index,'x']
node.y_coord = node_attributes_df.loc[node_index,'y']
node_type = node_attributes_df.loc[node_index,'highway']
node.node_type = node_type if isinstance(node_type, str) else ''
node.geometry = node_attributes_df.loc[node_index,'geometry']
g_macro_node_list.append(node)
g_node_id_to_seq_no_dict[node.node_id] = node.node_seq_no
g_number_of_macro_nodes += 1
print('generating macro links')
link_attributes_df = ox.graph_to_gdfs(G, nodes=False)
link_attributes_df['name'] = link_attributes_df.apply(lambda x: x['name'][0] if isinstance(x['name'], list) else x['name'], axis=1)
link_attributes_df['highway'] = link_attributes_df.apply(lambda x: x['highway'][0] if isinstance(x['highway'],list) else x['highway'],axis=1)
link_attributes_df['osmid'] = link_attributes_df.apply(lambda x: x['osmid'][0] if isinstance(x['osmid'],list) else x['osmid'],axis=1)
link_attributes_df['lanes'] = link_attributes_df.apply(lambda x: x['lanes'][0] if isinstance(x['lanes'],list) else x['lanes'],axis=1)
if 'maxspeed' not in link_attributes_df.columns:
link_attributes_df['maxspeed'] = np.nan
link_attributes_df['maxspeed'] = link_attributes_df.apply(lambda x: x['maxspeed'][0] if isinstance(x['maxspeed'],list) else x['maxspeed'],axis=1)
link_df_index_list = link_attributes_df.index
others_link_type_set = set()
for link_index in link_df_index_list:
link = MacroLink()
link.original_link_id = str(link_attributes_df.loc[link_index, 'osmid'])
if new_link_id_starting_from_one:
link.link_id = str(g_number_of_macro_links+1)
else:
link.link_id = link.original_link_id
link.name = link_attributes_df.loc[link_index,'name']
link_type_osm = link_attributes_df.loc[link_index,'highway']
if link_type_osm not in osm_link_type_dict.keys():
link_type = 'others'
if link_type_osm not in others_link_type_set: others_link_type_set.add(link_type_osm)
else:
link_type = osm_link_type_dict[link_type_osm]
link.link_type = link_type
link.link_type_code = link_type_code_dict[link_type]
number_of_lanes = link_attributes_df.loc[link_index,'lanes']
oneway = link_attributes_df.loc[link_index,'oneway']
if number_of_lanes == number_of_lanes:
link.number_of_lanes = int(number_of_lanes) if oneway else np.ceil(int(number_of_lanes) / 2)
else:
if use_default_value:
link.number_of_lanes = default_number_of_lanes[link.link_type]
max_speed = link_attributes_df.loc[link_index,'maxspeed']
if max_speed == max_speed:
link.speed_limit = float(max_speed[:-4]) if 'mph' in max_speed else float(max_speed)
else:
if use_default_value:
link.speed_limit = default_speed_limit[link.link_type]
if use_default_value:
link.capacity = default_lane_cap[link.link_type] * link.number_of_lanes
original_from_node_id = link_attributes_df.loc[link_index,'u']
original_to_node_id = link_attributes_df.loc[link_index, 'v']
if new_node_id_starting_from_one:
link.from_node_id = g_original_node_id_to_new_node_id_dict[original_from_node_id]
link.to_node_id = g_original_node_id_to_new_node_id_dict[original_to_node_id]
else:
link.from_node_id = original_from_node_id
link.to_node_id = original_to_node_id
link.length = link_attributes_df.loc[link_index,'length'] / 1000*0.6214
link.geometry = link_attributes_df.loc[link_index,'geometry']
if oneway:
g_macro_link_list.append(link)
g_number_of_macro_links += 1
else:
link_r = copy.deepcopy(link)
link.link_id = f'{link.link_id}a'
link_r.link_id = f'{link_r.link_id}b'
link_r.from_node_id, link_r.to_node_id = link.to_node_id, link.from_node_id
link_r.geometry = LineString(list(reversed(list(link.geometry.coords))))
g_macro_link_list.append(link)
g_macro_link_list.append(link_r)
g_number_of_macro_links += 2
print(f' following osm link types are represented by \'others\': {others_link_type_set}')
# def LonLat2Mile(lon1,lat1,lon2,lat2):
# lonrad1 = lon1 * np.pi / 180
# latrad1 = lat1 * np.pi / 180
# lonrad2 = lon2 * np.pi / 180
# latrad2 = lat2 * np.pi / 180
#
# a = latrad1 - latrad2
# b = lonrad1 - lonrad2
# cal = 2 * np.arcsin(np.sqrt((np.sin(a / 2))**2 + np.cos(latrad1) * np.cos(latrad2) * ((np.sin(b / 2)) ** 2))) * 6378.137
# return cal
#
#
# def DemandGeneration():
# if not generate_demand: return
#
# global demand_list
# coordinate_list = []
# number_of_outgoging_lanes_list = []
# number_of_incoming_lanes_list = []
# for i in range(g_number_of_macro_nodes):
# p_node = g_macro_node_list[i]
# if len(p_node.m_incoming_link_list) < 2 and len(p_node.m_outgoing_link_list) < 2:
# p_node.zone_id = g_number_of_zones
# coordinate_list.append([p_node.x,p_node.y])
# g_zone_list.append(Zone(p_node))
# number_of_outgoging_lanes_list.append(g_zone_list[-1].number_of_outgoing_lanes)
# number_of_incoming_lanes_list.append(g_zone_list[-1].number_of_incoming_lanes)
#
# coordinate_array = np.array(coordinate_list)
# number_of_outgoging_lanes_array = np.array(number_of_outgoging_lanes_list)
# number_of_incoming_lanes_array = np.array(number_of_incoming_lanes_list)
#
# demand_list = [['from_zone_id','to_zone_id','number_of_trips_demand_type1']]
# for i in range(g_number_of_zones):
# zone_distance = LonLat2Mile(coordinate_array[i,0],coordinate_array[i,1],coordinate_array[:,0],coordinate_array[:,1])
# demand = zone_distance * number_of_outgoging_lanes_array[i] * number_of_incoming_lanes_array
# for j in range(g_number_of_zones):
# if demand[j] > 0: demand_list.append([i,j,int(np.ceil(demand[j]))])
def generateDemands():
if not generate_demand: return
print('generating demand')
global g_number_of_zones
for link in g_macro_link_list:
from_node = g_macro_node_list[g_node_id_to_seq_no_dict[link.from_node_id]]
link.from_node = from_node
from_node.m_outgoing_link_list.append(link)
if link.link_type in from_node.adjacent_link_type_count_dict.keys():
from_node.adjacent_link_type_count_dict[link.link_type] += 1
else:
from_node.adjacent_link_type_count_dict[link.link_type] = 1
to_node = g_macro_node_list[g_node_id_to_seq_no_dict[link.to_node_id]]
link.to_node = to_node
to_node.m_incoming_link_list.append(link)
if link.link_type in to_node.adjacent_link_type_count_dict.keys():
to_node.adjacent_link_type_count_dict[link.link_type] += 1
else:
to_node.adjacent_link_type_count_dict[link.link_type] = 1
for node in g_macro_node_list:
if 'residential' in node.adjacent_link_type_count_dict.keys():
node.activity_type = 'residential'
else:
max_count_type = ''
max_count = 0
for link_type, count in node.adjacent_link_type_count_dict.items():
if count > max_count:
max_count = count
max_count_type = link_type
node.activity_type = max_count_type
for node in g_macro_node_list:
if (len(node.m_incoming_link_list) == 0) or (len(node.m_outgoing_link_list) == 0):
node.is_boundary = True
continue
if (len(node.m_incoming_link_list) == 1) and (len(node.m_outgoing_link_list) == 1):
ib_link = node.m_incoming_link_list[0]
ob_link = node.m_outgoing_link_list[0]
if ib_link.from_node_id == ob_link.to_node_id:
node.is_boundary = True
for node in g_macro_node_list:
if (node.activity_type == 'residential') or node.is_boundary:
node.zone_id = g_number_of_zones + 1
g_number_of_zones += 1
# build accessable set
print(' generating accessable node set (to ensure d_zone is reachable from o_zone)')
accessable_set_dict = {}
for node in g_macro_node_list:
accessable_set_dict[node.node_id] = {node.node_id}
cont_flag = True
while cont_flag:
cont_flag = False
for link in g_macro_link_list:
from_node_id = link.from_node_id
to_node_id = link.to_node_id
from_node_accessable_set = accessable_set_dict[from_node_id]
to_node_accessable_set = accessable_set_dict[to_node_id]
new_from_node_accessable_set = from_node_accessable_set.union(to_node_accessable_set)
if len(from_node_accessable_set) != len(new_from_node_accessable_set):
accessable_set_dict[from_node_id] = new_from_node_accessable_set
cont_flag = True
# generate od
print(' generating od flow between valid od pairs')
for node_o in g_macro_node_list:
for node_d in g_macro_node_list:
if node_o is node_d: continue
if node_o.is_boundary and node_d.is_boundary:
demand_type = 'external-external'
elif node_o.is_boundary and node_d.activity_type == 'residential':
demand_type = 'external-residential'
elif node_o.activity_type == 'residential' and node_d.is_boundary:
demand_type = 'residential-external'
elif node_o.activity_type == 'residential' and node_d.activity_type == 'residential':
demand_type = 'residential-residential'
else:
continue
if node_d.node_id in accessable_set_dict[node_o.node_id]:
g_demand_list.append(Demand(node_o.zone_id, node_d.zone_id, 1, demand_type))
def OutputResults():
print('outputting network files')
with open('node.csv', 'w', newline='') as outfile:
writer = csv.writer(outfile)
writer.writerow(['name','node_id','zone_id','ctrl_type','node_type','activity_type','is_boundary','x_coord','y_coord','geometry'])
for i in range(g_number_of_macro_nodes):
p_node = g_macro_node_list[i]
is_boundary = 1 if p_node.is_boundary else 0
line = [p_node.name,p_node.node_id,p_node.zone_id,p_node.control_type, p_node.node_type,p_node.activity_type,
is_boundary,p_node.x_coord,p_node.y_coord,p_node.geometry]
writer.writerow(line)
with open('link.csv', 'w', newline='') as outfile:
writer = csv.writer(outfile)
writer.writerow(['name','link_id','from_node_id','to_node_id','dir_flag','length','lanes','free_speed',
'capacity','link_type_name','link_type','geometry'])
for i in range(g_number_of_macro_links):
p_link = g_macro_link_list[i]
line = [p_link.name,p_link.link_id,p_link.from_node_id, p_link.to_node_id,p_link.direction,p_link.length,
p_link.number_of_lanes,p_link.speed_limit,p_link.capacity,p_link.link_type,p_link.link_type_code,p_link.geometry]
writer.writerow(line)
with open('link_type.csv', 'w', newline='') as outfile:
writer = csv.writer(outfile)
writer.writerow(['link_type','link_type_name'])
for linktype_name, linktype_code in link_type_code_dict.items():
writer.writerow([linktype_code,linktype_name])
if not generate_demand: return
with open('demand.csv', 'w', newline='') as outfile:
writer = csv.writer(outfile)
writer.writerow(['o_zone_id', 'd_zone_id', 'value', 'demand_type'])
number_of_demand_records = len(g_demand_list)
for i in range(number_of_demand_records):
p_demand = g_demand_list[i]
writer.writerow([p_demand.o_zone_id,p_demand.d_zone_id,p_demand.value,None])
if __name__ == '__main__':
GetNetwork() # get openstreetmap network
generateDemands()
OutputResults()
| 42.006818 | 149 | 0.658659 |
c618ecbcc102243f7afaee860970a257ffd2423f
| 3,583 |
py
|
Python
|
Networks/classification.py
|
emedinac/UnderstandingSA
|
a234631e99f2979396fef9e24f54865e63147ef4
|
[
"MIT"
] | null | null | null |
Networks/classification.py
|
emedinac/UnderstandingSA
|
a234631e99f2979396fef9e24f54865e63147ef4
|
[
"MIT"
] | null | null | null |
Networks/classification.py
|
emedinac/UnderstandingSA
|
a234631e99f2979396fef9e24f54865e63147ef4
|
[
"MIT"
] | null | null | null |
import torch, torchvision
import torch.nn as nn
from torch.nn import init
from .Xception import *
from .InceptionV4 import *
from .WideResnet import *
def init_weights(m):
global init_net
inet = init_net.split(',')[0]
dist = init_net.split(',')[1]
if inet=='xavier':
if dist=='uniform':
if isinstance(m, nn.Conv2d):
init.xavier_uniform_(m.weight.data)
if m.bias is not None: m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
if m.bias is not None: m.bias.data.zero_()
elif dist=='gauss':
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight.data)
if m.bias is not None: m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
if m.bias is not None: m.bias.data.zero_()
if inet=='xavier':
if dist=='uniform':
if isinstance(m, nn.Conv2d):
init.kaiming_uniform_(m.weight.data)
if m.bias is not None: m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
if m.bias is not None: m.bias.data.zero_()
elif dist=='gauss':
if isinstance(m, nn.Conv2d):
init.kaiming_normal_ (m.weight.data)
if m.bias is not None: m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
if m.bias is not None: m.bias.data.zero_()
def ChooseNet(name, classes=10, pretrained=None):
global init_net
conf = pretrained
init_net = conf.init # conf.init, pretrained is just for the env.
if conf:
pretrained = False
else:
pretrained = conf.pretrained;
if name=="InceptionV3": # Only ImageNet input
net = torchvision.models.inception_v3(num_classes=classes, pretrained=pretrained)
elif name=="InceptionV4": # Only ImageNet input
net = inceptionv4(num_classes=classes, pretrained=pretrained)
elif name=="VGG16": # Only ImageNet input
net = torchvision.models.vgg16_bn(pretrained=pretrained)
net.classifier._modules['0'] = nn.Linear(8192, 4096)
net.classifier._modules['6'] = nn.Linear(4096, classes)
elif name=="Resnet18":
net = torchvision.models.resnet18(pretrained=pretrained)
net.fc = nn.Linear(512,classes,bias=True)
elif name=="Resnet50":
net = torchvision.models.resnet50(pretrained=pretrained)
net.fc = nn.Linear(2048,classes,bias=True)
elif name=="Resnet101":
net = torchvision.models.resnet101(pretrained=pretrained)
net.fc = nn.Linear(2048,classes,bias=True)
elif name=="Squeeze11":
net = torchvision.models.squeezenet1_1(pretrained=pretrained)
net.num_classes=classes
net.classifier._modules['1'] = nn.Conv2d(512, classes, kernel_size=(1, 1), stride=(1, 1))
net.classifier._modules['3'] = nn.AvgPool2d(kernel_size=5, stride=1, padding=0)
elif name=="WideResNet101":
# net = torchvision.models.wide_resnet101_2(num_classes=classes, pretrained=pretrained)
if conf.dropFC: dropFC=0.0
else: dropFC=conf.dropFC
net = WideResNet(28, 10, dropout_rate=dropFC, num_classes=classes)
elif name=="Xception":
net = xception(num_classes=classes, pretrained=pretrained)
if not pretrained:
print(name, " , ", init_net )
net.apply(init_weights)
return net
| 42.654762 | 97 | 0.617918 |
526d688bd51719bdf67a3cd07d9676c455169411
| 371 |
py
|
Python
|
code_all/day03/exercise05.py
|
testcg/python
|
4db4bd5d0e44af807d2df80cf8c8980b40cc03c4
|
[
"MIT"
] | null | null | null |
code_all/day03/exercise05.py
|
testcg/python
|
4db4bd5d0e44af807d2df80cf8c8980b40cc03c4
|
[
"MIT"
] | null | null | null |
code_all/day03/exercise05.py
|
testcg/python
|
4db4bd5d0e44af807d2df80cf8c8980b40cc03c4
|
[
"MIT"
] | null | null | null |
"""
练习3:
在终端中输入课程阶段数,显示课程名称
1 显示 Python语言核心编程
2 显示 Python高级软件技术
3 显示 Web 全栈
4 显示 网络爬虫
5 显示 数据分析、人工智能
"""
course = input("请输入课程编号:")
if course == "1":
print("Python语言核心编程")
elif course == "2":
print("Python高级软件技术")
elif course == "3":
print("Web 全栈")
elif course == "4":
print("网络爬虫")
elif course == "5":
print("数据分析、人工智能")
| 17.666667 | 26 | 0.574124 |
32780f512d07f708e8b35c061cc4a0de9ab1f264
| 461 |
py
|
Python
|
tests/test_context/test_requires_context/test_context_utils.py
|
ksurta/returns
|
9746e569303f214d035462ae3dffe5c49abdcfa7
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_context/test_requires_context/test_context_utils.py
|
ksurta/returns
|
9746e569303f214d035462ae3dffe5c49abdcfa7
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_context/test_requires_context/test_context_utils.py
|
ksurta/returns
|
9746e569303f214d035462ae3dffe5c49abdcfa7
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from returns.context import Context, RequiresContext
def test_context_ask():
"""Ensures that ``ask`` method works correctly."""
assert Context[int].ask()(1) == 1
assert Context[str].ask()('a') == 'a'
def test_requires_context_from_value():
"""Ensures that ``from_value`` method works correctly."""
assert RequiresContext.from_value(1)(RequiresContext.empty) == 1
assert RequiresContext.from_value(2)(1) == 2
| 28.8125 | 68 | 0.681128 |
e74ac73438bbf497965b12e13ad8cac1a659cf64
| 399 |
py
|
Python
|
the_pic_hub/wsgi.py
|
asandelarvine/My_Pic_Hub
|
3cf11c01485e5bee75b3b3937525cedc1d55d473
|
[
"MIT"
] | null | null | null |
the_pic_hub/wsgi.py
|
asandelarvine/My_Pic_Hub
|
3cf11c01485e5bee75b3b3937525cedc1d55d473
|
[
"MIT"
] | null | null | null |
the_pic_hub/wsgi.py
|
asandelarvine/My_Pic_Hub
|
3cf11c01485e5bee75b3b3937525cedc1d55d473
|
[
"MIT"
] | null | null | null |
"""
WSGI config for the_pic_hub project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'the_pic_hub.settings')
application = get_wsgi_application()
| 23.470588 | 78 | 0.789474 |
e57e4896e7b3aa05f57af79c1bca37b9761f5455
| 15,782 |
py
|
Python
|
lncrawl/core/crawler.py
|
dragonroad99/lightnovel-crawler
|
eca7a71f217ce7a6b0a54d2e2afb349571871880
|
[
"Apache-2.0"
] | 1 |
2021-09-04T15:22:16.000Z
|
2021-09-04T15:22:16.000Z
|
lncrawl/core/crawler.py
|
dragonroad99/lightnovel-crawler
|
eca7a71f217ce7a6b0a54d2e2afb349571871880
|
[
"Apache-2.0"
] | 14 |
2021-08-28T07:34:38.000Z
|
2022-03-16T22:23:59.000Z
|
lncrawl/core/crawler.py
|
taz85/lightnovel-crawler
|
22d1200f64d3d1dc12b065f061ff3efd262040de
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Crawler application
"""
import itertools
import logging
import random
import re
import ssl
import sys
import unicodedata
from abc import ABC, abstractmethod
from concurrent.futures import ThreadPoolExecutor
from threading import Semaphore
from typing import Dict, List
from urllib.parse import urlparse
import cloudscraper
from bs4 import BeautifulSoup
from bs4.element import Comment, Tag
from requests import Response, Session
from ..assets.user_agents import user_agents
from ..utils.ssl_no_verify import no_ssl_verification
logger = logging.getLogger(__name__)
LINE_SEP = '<br>'
INVISIBLE_CHARS = [c for c in range(sys.maxunicode) if unicodedata.category(chr(c)) in {'Cf', 'Cc'}]
NONPRINTABLE = itertools.chain(range(0x00, 0x20), range(0x7f, 0xa0), INVISIBLE_CHARS)
NONPRINTABLE_MAPPING = {character: None for character in NONPRINTABLE}
MAX_CONCURRENT_REQUEST_PER_DOMAIN = 15
REQUEST_SEMAPHORES: Dict[str, Semaphore] = {}
def get_domain_semaphore(url):
host = urlparse(url).hostname or url
if host not in REQUEST_SEMAPHORES:
REQUEST_SEMAPHORES[host] = Semaphore(MAX_CONCURRENT_REQUEST_PER_DOMAIN)
return REQUEST_SEMAPHORES[host]
class Crawler(ABC):
'''Blueprint for creating new crawlers'''
def __init__(self) -> None:
self._destroyed = False
self.executor = ThreadPoolExecutor(max_workers=5)
# Initialize cloudscrapper
try:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.scraper = cloudscraper.create_scraper(
# debug=True,
ssl_context=ctx,
browser={
'custom': random.choice(user_agents),
#'browser': 'chrome',
#'platform': 'windows',
#'mobile': False
}
)
except Exception as err:
logger.exception('Failed to initialize cloudscraper')
self.scraper = Session()
# end try
# Must resolve these fields inside `read_novel_info`
self.novel_title = ''
self.novel_author = ''
self.novel_cover = None
self.is_rtl = False
# Each item must contain these keys:
# `id` - 1 based index of the volume
# `title` - the volume title (can be ignored)
self.volumes = []
# Each item must contain these keys:
# `id` - 1 based index of the chapter
# `title` - the title name
# `volume` - the volume id of this chapter
# `volume_title` - the volume title (can be ignored)
# `url` - the link where to download the chapter
self.chapters = []
# Other stuffs - not necessary to resolve from crawler instance.
self.home_url = ''
self.novel_url = ''
self.last_visited_url = None
# end def
# ------------------------------------------------------------------------- #
# Implement these methods
# ------------------------------------------------------------------------- #
def initialize(self) -> None:
pass
# end def
def login(self, email: str, password: str) -> None:
pass
# end def
def logout(self) -> None:
pass
# end def
def search_novel(self, query) -> List[Dict[str, str]]:
'''Gets a list of results matching the given query'''
return []
# end def
@abstractmethod
def read_novel_info(self) -> None:
'''Get novel title, autor, cover etc'''
raise NotImplementedError()
# end def
@abstractmethod
def download_chapter_body(self, chapter) -> str:
'''Download body of a single chapter and return as clean html format.'''
raise NotImplementedError()
# end def
def download_image(self, url) -> bytes:
'''Download image from url'''
logger.info('Downloading image: ' + url)
response = self.get_response(url, headers={
'accept': 'image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.9'
})
return response.content
# end def
def get_chapter_index_of(self, url) -> int:
'''Return the index of chapter by given url or 0'''
url = (url or '').strip().strip('/')
for chapter in self.chapters:
if chapter['url'] == url:
return chapter['id']
# end if
# end for
return 0
# end def
# ------------------------------------------------------------------------- #
# Helper methods to be used
# ------------------------------------------------------------------------- #
def destroy(self) -> None:
self._destroyed = True
self.volumes.clear()
self.chapters.clear()
self.scraper.close()
self.executor.shutdown(False)
# end def
@property
def headers(self) -> dict:
return dict(self.scraper.headers)
# end def
def set_header(self, key: str, value: str) -> None:
self.scraper.headers[key.lower()] = value
# end def
@property
def cookies(self) -> dict:
return {x.name: x.value for x in self.scraper.cookies}
# end def
def absolute_url(self, url, page_url=None) -> str:
url = (url or '').strip()
if len(url) > 1000 or url.startswith('data:'):
return url
# end if
if not page_url:
page_url = self.last_visited_url
# end if
if not url or len(url) == 0:
return url
elif url.startswith('//'):
return self.home_url.split(':')[0] + ':' + url
elif url.find('//') >= 0:
return url
elif url.startswith('/'):
return self.home_url.strip('/') + url
elif page_url:
return page_url.strip('/') + '/' + url
else:
return self.home_url + url
# end if
# end def
def is_relative_url(self, url) -> bool:
page = urlparse(self.novel_url)
url = urlparse(url)
return (page.hostname == url.hostname
and url.path.startswith(page.path))
# end def
def __process_response(self, response: Response) -> Response:
if response.status_code == 403 and response.reason == 'Forbidden':
raise Exception('403 Forbidden! Could not bypass the cloudflare protection.\n'
' If you are running from your own computer, visit the link on your browser and try again later.\n'
' Sometimes, using `http` instead of `https` link may work.')
response.raise_for_status()
response.encoding = 'utf8'
self.cookies.update({
x.name: x.value
for x in response.cookies
})
return response
def get_response(self, url, **kargs) -> Response:
if self._destroyed:
raise Exception('Instance is detroyed')
# end if
kargs = kargs or dict()
#kargs.setdefault('verify', False)
#kargs.setdefault('allow_redirects', True)
kargs.setdefault('timeout', 150) # in seconds
headers = kargs.setdefault('headers', {})
headers = {k.lower(): v for k, v in headers.items()}
#headers.setdefault('user-agent', random.choice(user_agents))
with get_domain_semaphore(url):
with no_ssl_verification():
response = self.scraper.get(url, **kargs)
self.last_visited_url = url.strip('/')
return self.__process_response(response)
# end def
def post_response(self, url, data={}, headers={}) -> Response:
if self._destroyed:
raise Exception('Instance is detroyed')
# end if
headers = {k.lower(): v for k, v in headers.items()}
#headers.setdefault('user-agent', random.choice(user_agents))
headers.setdefault('content-type', 'application/json')
logger.debug('POST url=%s, data=%s, headers=%s', url, data, headers)
with get_domain_semaphore(url):
with no_ssl_verification():
response = self.scraper.post(
url,
data=data,
headers=headers,
# verify=False,
# allow_redirects=True,
)
return self.__process_response(response)
# end def
def submit_form(self, url, data={}, multipart=False, headers={}) -> Response:
'''Submit a form using post request'''
if self._destroyed:
raise Exception('Instance is detroyed')
# end if
content_type = 'application/x-www-form-urlencoded; charset=UTF-8'
if multipart:
content_type = 'multipart/form-data'
# end if
headers = {k.lower(): v for k, v in headers.items()}
headers.setdefault('content-type', content_type)
response = self.post_response(url, data, headers)
return self.__process_response(response)
# end def
def get_soup(self, *args, **kwargs) -> BeautifulSoup:
parser = kwargs.pop('parser', None)
response = self.get_response(*args, **kwargs)
return self.make_soup(response, parser)
# end def
def make_soup(self, response, parser=None) -> BeautifulSoup:
if isinstance(response, Response):
html = response.content.decode('utf8', 'ignore')
elif isinstance(response, bytes):
html = response.decode('utf8', 'ignore')
elif isinstance(response, str):
html = str(response)
else:
raise Exception('Could not parse response')
# end if
soup = BeautifulSoup(html, parser or 'lxml')
if not soup.find('body'):
raise ConnectionError('HTML document was not loaded properly')
# end if
return soup
# end def
def get_json(self, *args, **kwargs) -> dict:
kwargs = kwargs or dict()
headers = kwargs.setdefault('headers', {})
headers = {k.lower(): v for k, v in headers.items()}
headers.setdefault('accept', 'application/json, text/javascript, */*')
response = self.get_response(*args, **kwargs)
return response.json()
# end def
def post_soup(self, url, data={}, headers={}, parser='lxml') -> BeautifulSoup:
response = self.post_response(url, data, headers)
return self.make_soup(response, parser)
# end def
def post_json(self, url, data={}, headers={}) -> dict:
headers = {k.lower(): v for k, v in headers.items()}
headers.setdefault('accept', 'application/json, text/javascript, */*')
response = self.post_response(url, data, headers)
return response.json()
# end def
def download_cover(self, output_file) -> None:
response = self.get_response(self.novel_cover)
with open(output_file, 'wb') as f:
f.write(response.content)
# end with
# end def
# ------------------------------------------------------------------------- #
blacklist_patterns = []
bad_tags = [
'noscript', 'script', 'style', 'iframe', 'ins', 'header', 'footer',
'button', 'input', 'amp-auto-ads', 'pirate', 'figcaption', 'address',
'tfoot', 'object', 'video', 'audio', 'source', 'nav', 'output', 'select',
'textarea', 'form', 'map',
]
bad_css = [
'.code-block', '.adsbygoogle', '.sharedaddy', '.inline-ad-slot', '.ads-middle',
'.jp-relatedposts', '.ezoic-adpicker-ad', '.ezoic-ad-adaptive', '.ezoic-ad',
'.cb_p6_patreon_button', 'a[href*="patreon.com"]',
]
p_block_tags = [
'p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'main', 'aside', 'article', 'div', 'section',
]
unchanged_tags = [
'pre', 'canvas', 'img'
]
plain_text_tags = [
'span', 'a', 'abbr', 'acronym', 'label', 'time',
]
substitutions = {
'"s': "'s",
'“s': "'s",
'”s': "'s",
'&': '&',
'u003c': '<',
'u003e': '>',
'<': '<',
'>': '>',
}
def clean_text(self, text) -> str:
text = str(text).strip()
text = text.translate(NONPRINTABLE_MAPPING)
for k, v in self.substitutions.items():
text = text.replace(k, v)
return text
# end def
def clean_contents(self, div):
if not isinstance(div, Tag):
return div
# end if
if self.bad_css:
for bad in div.select(','.join(self.bad_css)):
bad.extract()
# end if
# end if
for tag in div.find_all(True):
if isinstance(tag, Comment):
tag.extract() # Remove comments
elif tag.name == 'br':
next_tag = getattr(tag, 'next_sibling')
if next_tag and getattr(next_tag, 'name') == 'br':
tag.extract()
# end if
elif tag.name in self.bad_tags:
tag.extract() # Remove bad tags
elif hasattr(tag, 'attrs'):
tag.attrs = {k: v for k, v in tag.attrs.items() if k == 'src'}
# end if
# end for
div.attrs = {}
return div
# end def
def extract_contents(self, tag) -> str:
self.clean_contents(tag)
body = ' '.join(self.__extract_contents(tag))
return '\n'.join([
'<p>' + x + '</p>'
for x in body.split(LINE_SEP)
if not self.__is_in_blacklist(x.strip())
])
# end def
def __extract_contents(self, tag) -> list:
body = []
for elem in tag.contents:
if isinstance(elem, Comment):
continue
if not elem.name:
body.append(self.clean_text(elem))
continue
if elem.name in self.unchanged_tags:
body.append(str(elem))
continue
if elem.name == 'hr':
body.append(LINE_SEP)
# body.append('-' * 8)
# body.append(LINE_SEP)
continue
if elem.name == 'br':
body.append(LINE_SEP)
continue
# if not elem.text.strip():
# continue
is_block = elem.name in self.p_block_tags
is_plain = elem.name in self.plain_text_tags
content = ' '.join(self.__extract_contents(elem))
if is_block:
body.append(LINE_SEP)
# end if
for line in content.split(LINE_SEP):
line = line.strip()
if not line:
continue
# end if
if not (is_plain or is_block):
line = '<%s>%s</%s>' % (elem.name, line, elem.name)
# end if
body.append(line)
body.append(LINE_SEP)
# end if
if body and body[-1] == LINE_SEP and not is_block:
body.pop()
# end if
# end for
return [x.strip() for x in body if x.strip()]
# end def
def __is_in_blacklist(self, text) -> bool:
if not text:
return True
# end if
if not self.blacklist_patterns:
return False
# end if
pattern = getattr(self, '__blacklist__', None)
if not pattern:
pattern = re.compile('|'.join(['(%s)' % p for p in self.blacklist_patterns]))
setattr(self, '__blacklist__', pattern)
# end if
if pattern and pattern.search(text):
return True
return False
# end def
# end class
| 32.810811 | 128 | 0.540806 |
11b0a532e764fd73d29916dda225476ced89be23
| 104 |
py
|
Python
|
src/constants.py
|
will666/wasabi-diagrams
|
42e4bccc689c2b01843e10679a5b653e01266f48
|
[
"MIT"
] | null | null | null |
src/constants.py
|
will666/wasabi-diagrams
|
42e4bccc689c2b01843e10679a5b653e01266f48
|
[
"MIT"
] | null | null | null |
src/constants.py
|
will666/wasabi-diagrams
|
42e4bccc689c2b01843e10679a5b653e01266f48
|
[
"MIT"
] | null | null | null |
GRAPH_ATTR = {"fontsize": "45", "bgcolor": "transparent"}
OUTPUT_PATH = "../data"
OUTPUT_FORMAT = "png"
| 26 | 57 | 0.663462 |
dd427e810f0fc2608fb0877e138cefa6426104a4
| 405 |
py
|
Python
|
v1/venv/src/authentication/wsgi.py
|
AkashSDas/Django-Authentication-API
|
9c9a3f40e337619ddd1cdfd10871932aabee1d97
|
[
"MIT"
] | null | null | null |
v1/venv/src/authentication/wsgi.py
|
AkashSDas/Django-Authentication-API
|
9c9a3f40e337619ddd1cdfd10871932aabee1d97
|
[
"MIT"
] | null | null | null |
v1/venv/src/authentication/wsgi.py
|
AkashSDas/Django-Authentication-API
|
9c9a3f40e337619ddd1cdfd10871932aabee1d97
|
[
"MIT"
] | null | null | null |
"""
WSGI config for authentication project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'authentication.settings')
application = get_wsgi_application()
| 23.823529 | 78 | 0.792593 |
ff8396dbb055f251045e7bab4b498eaceaeb5dd8
| 1,865 |
py
|
Python
|
open_humans/management/commands/stats.py
|
danamlewis/open-humans
|
9b08310cf151f49032b66ddd005bbd47d466cc4e
|
[
"MIT"
] | 57 |
2016-09-01T21:55:52.000Z
|
2022-03-27T22:15:32.000Z
|
open_humans/management/commands/stats.py
|
danamlewis/open-humans
|
9b08310cf151f49032b66ddd005bbd47d466cc4e
|
[
"MIT"
] | 464 |
2015-03-23T18:08:28.000Z
|
2016-08-25T04:57:36.000Z
|
open_humans/management/commands/stats.py
|
danamlewis/open-humans
|
9b08310cf151f49032b66ddd005bbd47d466cc4e
|
[
"MIT"
] | 25 |
2017-01-24T16:23:27.000Z
|
2021-11-07T01:51:42.000Z
|
# -*- coding: utf-8 -*-
import arrow
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from termcolor import colored
from open_humans.models import Member
UserModel = get_user_model()
class Command(BaseCommand):
"""
Generate signup codes.
"""
help = "Statistics on the last day(s) of users"
args = ""
def add_arguments(self, parser):
parser.add_argument(
"--days", nargs="?", type=int, default=1, help="the number of days to show"
)
def handle(self, *args, **options):
day_offset = options["days"] - 1
end = arrow.now().span("day")[1]
start = end.replace(days=-day_offset).span("day")[0]
users = (
UserModel.objects.all()
.filter(date_joined__range=[start.datetime, end.datetime])
.order_by("date_joined")
)
for user in users:
self.stdout.write(
"{} ({})".format(
user.username, arrow.get(user.date_joined).format("YYYY-MM-DD")
)
)
try:
for key, connection in list(user.member.connections.items()):
suffix = "no key data"
data = getattr(user, key).get_retrieval_params()
if key == "pgp" and "huID" in data:
suffix = data["huID"]
if key == "runkeeper" and "access_token" in data:
suffix = "access token present"
self.stdout.write(
" {}: {} {}".format(
connection["verbose_name"], colored("✔", "green"), suffix
)
)
except Member.DoesNotExist:
pass
self.stdout.write("")
| 27.426471 | 87 | 0.504558 |
45ae533a8c85dee0449dd8756ce23d7d0adf9d7c
| 688 |
py
|
Python
|
setup.py
|
nmoisseeva/cwipp
|
da5f13f236e9f0739088562abf2ab12beaac80ee
|
[
"MIT"
] | null | null | null |
setup.py
|
nmoisseeva/cwipp
|
da5f13f236e9f0739088562abf2ab12beaac80ee
|
[
"MIT"
] | null | null | null |
setup.py
|
nmoisseeva/cwipp
|
da5f13f236e9f0739088562abf2ab12beaac80ee
|
[
"MIT"
] | 1 |
2021-06-25T02:04:11.000Z
|
2021-06-25T02:04:11.000Z
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="cwipp",
version="0.0.1",
author="Nadya Moisseeva",
author_email="[email protected]",
description="Plume-rise parameterization package for wildfire smoke",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/nmoisseeva/cwipp/",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 29.913043 | 73 | 0.670058 |
cb2cc2c169c3a8c6d4a7c82c64e406b8f28be765
| 1,076 |
py
|
Python
|
zabbixwechat/checkdbnoresolved.py
|
ansemz/zabbix-wechat
|
870f3676e9ce569eae01656653669b244ef2b180
|
[
"Apache-2.0"
] | 81 |
2017-03-19T13:54:44.000Z
|
2022-01-13T08:36:44.000Z
|
zabbixwechat/checkdbnoresolved.py
|
tony163/zabbixwechat
|
d0d187e490ebb2b563a417c450db4fe21e7817ea
|
[
"Apache-2.0"
] | 2 |
2017-04-12T09:33:07.000Z
|
2019-04-24T11:20:54.000Z
|
zabbixwechat/checkdbnoresolved.py
|
tony163/zabbixwechat
|
d0d187e490ebb2b563a417c450db4fe21e7817ea
|
[
"Apache-2.0"
] | 42 |
2017-03-19T14:00:39.000Z
|
2021-12-26T04:52:38.000Z
|
# coding: utf-8
from zabbixwechat.common import *
from django.shortcuts import render
from zabbix_wechat_db.models import ALARM_INFO
import os
import time
def checkdbnoresolved():
callgettext()
now = int(time.time())
hosts= ALARM_INFO.objects.filter(
CONFIRM_TIME="",
RESOLVE_TIME="",
SEVERITY__gt=1,
ALARM_TIME__gt=now -
86400,
ALARM_TIME__lt=now -
600).values_list('HOST_GROUP').distinct()
if len(hosts)!=0:
for i in hosts:
num = ALARM_INFO.objects.filter(
CONFIRM_TIME="",
RESOLVE_TIME="",
SEVERITY__gt=1,
HOST_GROUP=i[0],
ALARM_TIME__gt=now - 86400,
ALARM_TIME__lt=now -600).count()
data = _("There are {0} alarms in regin {1} has not been comfirmed").format(num,i[0])
agentid=findagentid(i[0])
content = [{"title": "{0}".format(data)}]
toparty = findgroupid(i[0])
senddatanews(content,toparty,agentid)
print ("OK")
| 30.742857 | 98 | 0.571561 |
c90895f15617dda411cdd86fff69b66b448eba45
| 23,050 |
py
|
Python
|
sanitize/sanitize_utils.py
|
lawrluor/matchstats
|
0c8cd08403d5fa2772b3d5d9391a804866d15dce
|
[
"BSD-3-Clause"
] | 6 |
2015-06-22T16:51:03.000Z
|
2017-12-05T22:18:39.000Z
|
sanitize/sanitize_utils.py
|
lawrluor/matchstats
|
0c8cd08403d5fa2772b3d5d9391a804866d15dce
|
[
"BSD-3-Clause"
] | 1 |
2021-06-01T21:44:48.000Z
|
2021-06-01T21:44:48.000Z
|
sanitize/sanitize_utils.py
|
lawrluor/matchstats
|
0c8cd08403d5fa2772b3d5d9391a804866d15dce
|
[
"BSD-3-Clause"
] | null | null | null |
import re
from collections import defaultdict
# Raw Regular expression list representing top players, to be processed by add_prefixes
player_raw_regex_dict = {
'Global' : [
['(mang[o0]$)', '(c9mang[o0]$)'],
['(armada$)', '(\[a\]rmada$)'],
['(ppmd$)', '(dr\. pp$)', '(dr\. peepee$)', '(dr pp$)', '(dr peepee$)', '(doctor pp$)', '(doctor peepee$)'],
['(mew2king$)', '(m2k$)'],
['(hungrybox$)', '(hbox$)'],
['(leffen$)', '(l3ff3n$)'],
['(axe$)'],
['(hax$)', '(hax\$$)'],
['(westballz$)'],
['(colbol$)'],
['(fly amanita$)'],
['(lucky$)'],
['(pewpewu$)', '(ppu$)', '(pewpewyou$)'],
['(shroomed$)'],
['(silentwolf$)', '(silent wolf$)'],
['(plup$)'],
['(fiction$)'],
['(s2j$)', '(smoke2jointz$)'],
['(ice$)'],
['(sfat$)'],
['(zhu$)'],
['(kirbykaze$)', '(kk$)'],
['(nintendude$)'],
['(macd$)'],
['(amsa$)'],
['(chillindude$)', '(chillindude829$)', '(chillin$)'],
['(javi$)'],
['(kels$)'],
['(wizzrobe$)', '(wizzy$)'],
['(the moon$)', '(la luna$)', '(moon$)'],
['(eddy mexico$)'],
['(chu dat$)', '(chudat$)'],
['(bladewise$)'],
['(abate$)'],
['(zer[o0]$)'],
['(larry lurr$)', '(larrylurr$)', '(DEHF$)'],
['(hugs$)', '(hugs86$)'],
['(duck$)'],
['(dj nintendo$)', '(djn$)'],
['(kalamazhu$)', '(kalamazhoo$)', '(kzhu$)'],
['(lord$)'],
['(cactuar$)', '(cactus$)'],
['(weon-x$)', '(weon x$)', '(weonx$)'],
['(darkrain$)'],
['(kage$)', '(kage the warrior$)'],
['(zanguzen$)'],
['(silentspect[er][re]$)', '(silent spect[er][re]$)'],
['(koreandj$)', '(korean dj$)', '(kdj$)'],
['(swiftbass$)', '(swift$)', '(gibson zero$)', '(gibsonzero$)', '(phantom renegade$)'],
['(z[o0]s[o0]$)'],
['(raynex$)'],
['(darkatma$)', '(atma$)'],
['(porkchops$)'],
['(ib$)', '(i\.b\.$)'],
['(darc$)'],
['(swedish delight$)', '(swedish$)'],
['(okamibw$)', '(okami bw$)', '(okami$)'],
['(ken$)', '(sephiroth ken$)', '(sephirothken$)'],
['(vanz$)'],
['(excel zer[o0]$)', '(excelzero$)'],
['(darrell$)', '(darell$)', '(darrel$)'],
['(sl[o0]x$)', '(sl[o0]x\)$)'],
['(beer man$)', '(beer master$)', '(lambchops$)'],
['(s[o0]ft$)'],
['(fuzzyness$)'],
['(taf[o0]kints$)', '(taf[o0]$)'],
['(lil fumi$)', '(santi$)', '(santiago$)'],
['(dart$)', '(dart!$)'],
['(blea gelo$)', '(bleagelo$)'],
['(redd$)'],
['(hanky panky$)', '(hankypanky$)'],
['(remen$)'],
['(str[1i]cn[1iy]n[e3]$)', '(str[1i]c9$)', '(str[1i]c n[1iy]n[e3]$)', '(str[1i]c 9$)'],
['(cyrain$)'],
['(homemadewaffles$)', '(hmw$)', '(yung waff$)'],
['(pikachad$)', '(pika chad$)'],
['(ren[o0]$)'],
['(gahtzu$)', '(ghatzu$)'],
['(doh$)', '(darkness of heart$)'],
['(eggm$)'],
['(arc$)'],
['(t[o0]pe$)'],
['(drugged fox$)', '(druggedfox$)'],
['(gravy$)'],
['(tai$)'],
['(lucien$)'],
['(lord hdl$)', '(lordhdl$)'],
['(trail$)'],
['(scar$)'],
['(laudandus$)', '(laud[au][au]ndus$)', '(laduandus$)'],
['(t[o0]ph$)'],
['(alex19$)'],
['(c[o0]nn[o0]rthekid$)', '(c[o0]nn[o0]r$)', '(c[o0]nn[o0]r the kid$)'],
['([bj]izzarro flame$)', '([bj]izzarroflame$)', '([bj]izaro flame$)', '([bj]izzaro flame$)', '([bj]izarro flame$)'],
['(hyprid$)'],
['(a[zs]u[sz]a$)'],
['(m[o0]j[o0]$)'],
['(milkman$)', '(milk man$)'],
['(frootloop$)'],
['(esam$)'],
# MIOM 2013 Rank Begins
['(wobbles$)', '(wobbles the phoenix$)'],
['(unknown522$)', '(unknown 522$)', '(ryan ford$)'],
['(taj$)'],
['(overtriforce$)', '(over$)', '(over triforce$)'],
['(dashizwiz$)', '(shiz$)', '(da shiz wiz$)'],
['(vwins$)'],
['(th[o0]rn$)'],
['(lovage$)'],
['(jman$)'],
['(gucci$)'],
['(blunted_object10$)', '(blunted_object$)', '(blunted object$)'],
['(bam$)'],
['(sung666$)', '(sung 666$)', '(sung$)'],
['(eggz$)'],
['(strawhat dahean$)', '(strawhat$)'],
['(vish$)'],
['(sion$)'],
['(phil$)'],
['(bob\$$)'],
['(kounotori$)'],
['(stab$)', '(victor abdul latif$)', '(stabbedbyahippie$)', '(stabbedbyanipple$)'],
['(g\$$)'],
['(vist$)'],
['(pkmvodka$)', '(pkm vodka$)'],
['(prince abu$)', '(princeabu$)'],
['(sauc3$)'],
['(n[o0]ne$)'],
['(amsah$)'],
['(professor pro$)', '(prof pro$)', '(professorpro$)'],
],
'New England' : [
['(sl[o0]x$)'],
['(koreandj$)', '(korean dj$)', '(kdj$)'],
['(swiftbass$)', '(swift$)', '(gibson zero$)', '(gibsonzero$)', '(phantom renegade$)'],
['(z[o0]s[o0]$)'],
['(th[o0]rn$)'],
['(crush$)'],
['(mafia$)', '(maf$)', '(irish mafia$)', '(slimjim$)', '(slim jim$)'],
['(mdz$)', '(mattdotzeb$)', '(matt dot zeb$)', '(kizzuredux$)'],
['(klap[s$]$)'],
['(tian$)'],
['(squible$)', '(squibble$)'],
['(kyupuff$)', '(kyu puff$)', '(plop$)', '(buffglutes92$)', '(the pleaup$)'],
['(rime$)', '(rl$)'],
['(mr lemon$)', '(mr\. lemon$)'],
['(kaiju$)'],
['(dudutsai$)', '(dudustsai$)', '(dudustai$)', '(tsai$)', '(stb$)'],
['(sora$)'],
['(hart$)'],
['(mr tuesday$)', '(mr\. tuesday$)', '(mr\.tuesday$)'],
['(bigvegetabluntz$)', '(bvb$)'],
['(me[tl][tl]wing$)'],
['(b[o0]lt$)', '(b[o0]wn$)', '(b[o0]lt\$\$\.\.)'],
['(cheezpuff$)'],
['(r2dliu$)'],
['(kaza[am]m$)', '(kaza[am]mtheman$)'],
['(trademark$)'],
['(yedi$)'],
['(bugatti$)', '(mr bugatti$)', '(mr\. bugatti$)'],
['(ryucloud$)', '(ryu cloud$)'],
['(mizu$)'],
['(batsox$)', '(batsox the realest luigi$)'],
['(bonfire10$)', '(bonfire$)'],
['(trilok$)'],
['(kunai$)', '(wx$)', '(wxia$)'],
['(arc$)', '(arcnatural$)', '(arc natural$)'],
['(flexed$)'],
['(spiff$)'],
['(me[tl][tl]wing$)'],
['(vudoo$)'],
['(tichinde925$)', '(tichinde$)', '(tichinde 925$)', '(master of setups$)'],
['(bugatti$)', '(colinsfuckboi$)', '(tpains producer$)'],
['(makari$)'],
['(young bone[sz] vill[ia][ia]n$)', '(yung bone[sz] vill[ia][ia]n$)', '(yungbone[sz]villain)', '(youngbone[sz]villain)'],
['(thechocolatelava$)', '(middleeastballz$)'],
['(tonic$)'],
['(broth chiler$)', '(broth chiller$)'],
['(hea7$)', '(areallyshittysheik$)'],
['(torsional strain$)', '(torsionalstrain$)'],
['(bonk$)', '(bonk cushy$)', '(bonkcushy$)', '(notable bonk$)'],
['(shkshk$)', '(shk shk)'],
['(snoww[ie]ner$)', '(snow w[ie]ner$)'],
['(stoc$)', '(stoc\$\$$)'],
['(wind$)'],
['(swissmiss$)', '(swiss miss$)'],
['(hackey$)', '(ghettowarmachine420$)'],
['(heropon$)', '(2dank$)'],
['(dis$)'],
['(edwin dexter$)', '(edwindexter$)'],
['(mizuki$)'],
['(corona$)'],
['(spooky ghost$)', '(spookyghost$)'],
['(spell$)'],
['(maso$)'],
['(jlo$)'],
['(coldo$)'],
['(nfreak$)'],
['(hazard$)'],
['(solar$)'],
['(pyro$)'],
['(bluntmaster$)', '(blunt master$)', '(s2b$)'],
['(para$)'],
['(racer$)', '(cashbags fatstackington$)', '(racer\$$)', '(racer.money$)', '(mr\. melon$)', '(mr melon$)'],
['(seaghost$)', '(sea ghost$)'],
['(fang$)'],
['(null$)'],
['(gtowntom$)', '(gtown tom$)', '(gtown_tom$)', '(gtown-tom$)', '(g-town tom$)'],
['(barbie$)'],
['(red rice$)', '(redrice$)'],
['(doom$)'],
['(darc$)'],
['(rarik$)'],
['(guti$)'],
['(poobanans$)'],
['(zila$)'],
['(corona$)'],
['(uboa$)', '(greyface$)'],
['(lint$)'],
['(razz$)'],
['(blazingsparky$)', '(blazing sparky$)', '(blazing spark$)', '(blazingspark$)'],
['(zeo$)'],
['(connor$)', '(connor s$)'],
['(dazwa$)'],
['(mrryanness$)', '(mr ryan ness$)', '(ryanness$)', '(mr ryan s$)'],
['(bus$)'],
['(eric green$)', '(ericgreen$)'],
['(glory swole$)'],
['(neji$)'],
['(ironchief$)', '(iron chief$)'],
['(eatboxman$)'],
['(damp$)'],
['(asl$)'],
['(neft$)'],
['(shminkledorf$)'],
['(termina bay$)'],
['(kalvar$)'],
['(kevin m$)', '(kevinm$)'],
['(fury$)'],
['(twisty$)'],
['(clue$)'],
['(xpilot$)', '(x pilot$)'],
['(-1$)'],
['(donnie cash$)', '(donniecash$)', '(don$)'],
['(infinite numbers$)', '(numbers$)', '(infinitenumbers$)'],
['(blahyay$)'],
['(ses$)'],
['(charlie harper$)', '(charlieharper$)'],
['(refried jeans$)', '(refriedjeans$)'],
['(stickman$)', '(tree$)'],
['(lalo$)'],
['(refresh$)'],
['(arty$)'],
['(the yak$)', '(yak$)'],
['(quality steve$)', '(qualitysteve$)'],
['(veggietales$)', '(veggie tales$)'],
['(oyo$)'],
['(yat$)'],
['(boy$)'],
['(seabass$)'],
['(tartox$)'],
['(khan$)'],
['(matat[o0]$)'],
['(jfm$)'],
['(ct$)'],
['(beetle$)'],
['(captain crunch$)', '(captaincrunch$)'],
['(zealous 5000$)', '(zealous5000$)'],
['(thunderpaste$)', '(thunder paste$)'],
['(joyboy$)', '(joy boy$)'],
],
'NorCal' : [
['(shroomed$)'],
['(pewpewu$)', '(ppu$)', '(pewpewyou$)'],
['(sfat$)'],
['(silentspect[er][re]$)', '(silent spect[er][re]$)'],
['(darrell$)', '(darell$)', '(darrel$)'],
['(homemadewaffles$)', '(hmw$)', '(yung waff$)'],
['(lucien$)'],
['(scar$)'],
['(laudandus$)'],
['(t[o0]ph$)'],
['([bj]izzarro flame$)', '([bj]izzarroflame$)', '([bj]izaro flame$)', '([bj]izzaro flame$)', '([bj]izarro flame$)'],
['(hyprid$)'],
['(a[zs]u[sz]a$)'],
['(phil$)']
],
'SoCal' : [
['(mang[o0]$)', '(c9mang[o0]$)'],
['(lucky$)'],
['(westballz$)'],
['(fly amanita$)'],
['(fiction$)'],
['(s2j$)', '(smoke2jointz$)'],
['(macd$)'],
['(eddy mexico$)'],
['(larry lurr$)', '(larrylurr$)', '(DEHF$)'],
['(hugs$)', '(hugs86$)'],
['(okamibw$)', '(okami bw$)', '(okami$)'],
['(ken$)', '(sephiroth ken$)', '(sephirothken$)', '(liquidken$)'],
['(taf[o0]kints$)', '(taf[o0]$)'],
['(lil fumi$)', '(santi$)', '(santiago$)'],
['(ren[o0]$)'],
['(alex19$)'],
['(c[o0]nn[o0]rthekid$)', '(c[o0]nn[o0]r$)', '(c[o0]nn[o0]r the kid$)'],
['([bj]izzarro flame$)', '([bj]izzarroflame$)', '([bj]izaro flame$)', '([bj]izzaro flame$)', '([bj]izarro flame$)'],
['(hyprid$)'],
['(lovage$)'],
['(sung666$)', '(sung 666$)', '(sung$)', '(sung475$)', '(sung 475$)'],
['(stab$)', '(victor abdul latif$)', '(stabbedbyahippie$)', '(stabbedbyanipple$)', '(matt$)'],
['(mikehaze$)', '(mike haze$)'],
['(kira$)'],
['(rofl$)'],
['(j666$)', '(j devil$)', '(jdevil$)'],
['(reason$)'],
['(a rookie$)', '(arookie$)'],
['(squid$)'],
['(jace$)'],
['(koopatroopa$)', '(koopatroopa895$)', '(koopa troopa$)', '(koopa troopa 895$)'],
['(captain faceroll$)', '(captainfaceroll$)', '(faceroll$)'],
['(psychomidget$)', '(psycho midget$)'],
['(jpegimage$)', '(jpeg image$)'],
['(sherigami$)', '(sherigam$)'],
['(mevan$)'],
['(coolhat$)', '(cool hat$)'],
['(peligro$)'],
['(dunk$)'],
['(dendypretendy$)', '(dendy$)', '(dendy pretendy)'],
['(khepri$)'],
['(mixx$)', '(diya$)'],
['(sacasumoto$)'],
['(null$)'],
['(zeo$)'],
['(tonic$)'],
['(sora$)']
],
'North Carolina' : [
['(l[o0]zr$)'],
['(jwilli$)'],
['(ts3d$)'],
['(cope$)', '(ke$ha$)', '(sgt\. thunderfist md$)'],
['(wharve$)', '(warve$)'],
['(mining elf$)', '(elf$)'],
['(dembo$)', '(discoprof$)', '(d\'embaux$)', '(discovery professor dembo$)', '(d\'embeaux$)'],
['(tenbutts$)', '(ten butts$)'],
['(quetpie$)', '(andrew que$)', '(que t pie$)', '(quetpie forever$)'],
['(loudpackmatt$)', '(n$)', '(loud$)', '(loudpackmatt$)'],
['(tinkle$)', '(tinkl$)'],
['(catfish joe$)', '(joey bluntz$)'],
['(banjo$)', '(banjo d\. fingers$)'],
['(kchain\$$)', '(dr\. fingerdicks$)', '(dr\. fd$)'],
['(t raw$)', '(traw$)'],
['(@the_priceisnice$)', '(alanp$)'],
['(arundo$)', '(arumdo$)'],
['(caleb$)', '(oak town$)', '(not caleb$)', '(caliber$)'],
['(madz$)', '(maddie$)'],
['(niq$)'],
['(geezer$)'],
['(ezvega$)'],
['(salscat$)', '(salsacat$)', '(salsa cat$)'],
['(kun\$$)'],
['(byrd$)'],
['(\):)', '(:\()'],
['(cloudsquall$)']
]
}
# Sanitized tags representing global top players.
sanitized_tags_dict = {
'Global' : [
'Mango',
'Armada',
'PPMD',
'Mew2King',
'Hungrybox',
'Leffen',
'Axe',
'Hax',
'Westballz',
'Colbol',
'Fly Amanita',
'Lucky',
'PewPewU',
'Shroomed',
'Silentwolf',
'Plup',
'Fiction',
'S2J',
'Ice',
'SFAT',
'Zhu',
'Kirbykaze',
'Nintendude',
'MacD',
'aMSa',
'Chillindude',
'Javi',
'Kels',
'Wizzrobe',
'The Moon',
'Eddy Mexico',
'ChuDat',
'Bladewise',
'Abate',
'Zero',
'Larry Lurr',
'HugS',
'Duck',
'DJ Nintendo',
'Kalamazhu',
'Lord',
'Cactuar',
'Weon-X',
'Darkrain',
'Kage',
'Zanguzen',
'SilentSpectre',
'KoreanDJ',
'Swift',
'Zoso',
'Raynex',
'Darkatma',
'Porkchops',
'I.B.',
'Darc',
'Swedish Delight',
'OkamiBW',
'Ken',
'Vanz',
'Excel Zero',
'Darrell',
'Slox',
'Lambchops',
'S0ft',
'Fuzzyness',
'Tafokints',
'Santi',
'Dart',
'Blea Gelo',
'Redd',
'Hanky Panky',
'Remen',
'Stricnyn3',
'Cyrain',
'HomeMadeWaffles',
'Pikachad',
'Reno',
'Gahtzu',
'DOH',
'Eggm',
'Arc',
'Tope',
'Druggedfox',
'Gravy',
'Tai',
'Lucien',
'Lord HDL',
'Trail',
'Scar',
'Laudandus',
'Toph',
'Alex19',
'Connor',
'Bizzarro Flame',
'Hyprid',
'Azusa',
'Mojo',
'Milkman',
'Frootloop',
'ESAM',
# 2013 MIOM Rank List Begins
'Wobbles',
'Unknown522',
'Taj',
'Overtriforce',
'DaShizWiz',
'Vwins',
'Thorn',
'Lovage',
'Jman',
'Gucci',
'Blunted_object10',
'Bam',
'Sung',
'Eggz',
'Strawhat Dahean',
'Vish',
'Sion',
'Phil',
'Bob$',
'Kounotori',
'Matt (Stab)',
'G$',
'Vist',
'Pkmvodka',
'Prince Abu',
'Sauc3',
'N0ne',
'Amsah',
'Professor Pro'
],
'New England' : [
'Slox',
'KoreanDJ',
'Swift',
'Zoso',
'Thorn',
'Crush',
'Mafia',
'MattDotZeb',
'Klap$',
'Tian',
'Squible',
'Kyu Puff',
'Rime',
'Mr. Lemon',
'Kaiju',
'Dudutsai',
'Sora [New England]',
'Hart',
'Mr. Tuesday',
'BVB',
'Metlwing',
'Bolt',
'Cheezpuff',
'R2DLiu',
'Kazamm',
'Trademark',
'Yedi',
'Bugatti',
'RyuCloud',
'MIZU',
'BatSox',
'Bonfire10',
'Trilok',
'Kunai',
'Arc',
'Flexed',
'Spiff',
'Metlwing',
'Vudoo',
'Tichinde',
'Bugatti',
'Makari',
'Yung Bones Villain',
'TheChocolateLava',
'Tonic [New England]',
'Broth Chiler',
'Hea7',
'Torsional Strain',
'Bonk',
'Shk Shk',
'Snowweiner',
'STOC',
'Wind',
'Swissmiss',
'GhettoWarMachine420',
'Heropon',
'Dis',
'Edwin Dexter',
'Mizuki',
'Corona',
'Spooky Ghost',
'Spell',
'Maso',
'JLo',
'Coldo',
'NFreak',
'Hazard',
'Solar',
'Pyro',
'S2B',
'Para',
'Racer',
'Seaghost',
'Fang',
'Null [New England]',
'Gtown_Tom',
'Barbie',
'Red Rice',
'Doom',
'Darc',
'Rarik',
'Guti',
'Poobanans',
'Zila',
'Corona',
'Uboa',
'Lint',
'Razz',
'BlazingSparky',
'Zeo [New England]',
'Connor [New England]',
'Dazwa',
'MrRyanNess',
'Bus',
'Eric Green',
'Glory Swole',
'Neji',
'IronChief',
'EatBoxMan',
'Damp',
'ASL',
'Neft',
'Shminkledorf',
'Termina Bay',
'Kalvar',
'Kevin M',
'Fury',
'Twisty',
'Clue',
'XPilot',
'-1',
'Donnie Cash',
'Infinite Numbers',
'Blahyay',
'Ses',
'Charlie Harper',
'Refried Jeans',
'Tree',
'Lalo',
'Refresh',
'Arty',
'The Yak',
'Quality Steve',
'Veggietales',
'oYo',
'Yat',
'Boy',
'Seabass',
'Tartox',
'Khan',
'Matat0',
'JFM',
'CT',
'Beetle',
'Captain Crunch',
'Zealous5000',
'ThunderPaste',
'JoyBoy',
],
'NorCal' : [
'Shroomed',
'PewPewU',
'SFAT',
'SilentSpectre',
'Darrell',
'HomeMadeWaffles',
'Lucien',
'Scar',
'Laudandus',
'Toph',
'Bizzarro Flame',
'Hyprid',
'Azusa',
'Phil'
],
'SoCal' : [
'Mango',
'Lucky',
'Westballz',
'Fly Amanita',
'Fiction',
'S2J',
'MacD',
'Eddy Mexico',
'Larry Lurr',
'HugS',
'OkamiBW',
'Ken',
'Tafokints',
'Santiago',
'Reno',
'Alex19',
'Connor [SoCal]',
'Bizzarro Flame',
'Hyprid',
'Lovage',
'Sung',
'Matt (Stab)',
'Mike Haze',
'Kira',
'Rofl',
'J666',
'Reason',
'A Rookie',
'Squid',
'Jace',
'KoopaTroopa',
'Captain Faceroll',
'PsychoMidget',
'JPeGImage',
'Sherigami',
'Mevan',
'Coolhat',
'Peligro',
'Dunk',
'DendyPretendy',
'Khepri',
'Mixx',
'SacaSuMoto',
'Null [SoCal]',
'Zeo [SoCal]',
'Tonic [SoCal]',
'Sora [SoCal]'
],
'North Carolina' : [
'LoZR',
'Jwilli',
'TS3D',
'Ke$ha',
'Wharve',
'Mining Elf',
'Dembo',
'tenbutts',
'QueTPie',
'Loudpackmatt',
'Tinkle',
'Catfish Joe',
'Banjo',
'KChain$',
'T Raw',
'@The_PriceisNICE',
'Arundo',
'Caleb [North Carolina]',
'Madz',
'Niq',
'Geezer',
'EZVega',
'Salsacat',
'Kun$',
'Byrd',
'):',
'Cloudsquall'
]
}
# Wrapper for regular expression compilation for mapping.
def compile_case_i_re(string):
return re.compile(string, re.IGNORECASE)
# Make sure sanitized_list and regex_list are the same size or
# you might get an index out of bounds error.
# Params:
# tag - String containing tag to check.
# regex_list - Compiled regular expressions to check against the tag, a list of lists
# sanitized_list - Sanitized versions of the tag.
def sanitize_tag(tag, regex_list, sanitized_list):
for i in range(len(regex_list)):
if regex_list[i].match(tag):
return sanitized_list[i]
return tag
# Identical to sanitize_tag, but returns list of matching regex expressions instead of returning the first one
def sanitize_tag_multiple(tag, regex_list, sanitized_list):
tag_list = []
for i in range(len(regex_list)):
if regex_list[i].match(tag):
tag_list.append(sanitized_list[i])
if len(tag_list) > 0:
return tag_list
else:
# if no matches, return the original tag searched
return tag
# Takes player_raw_regex_dict[region] as a parameter through map, meaning that each index is one list inside the list of regex expressions in player_raw_regex_dict['Global$)'].
def add_prefixes(regex_list):
wildcard = '.*'
sep = '[|_.`\' ]'
# suffix = '$)'
prefix = wildcard + sep
prefix_list = []
inner_list = []
for regex in regex_list:
# 4 combinations
inner_list.append(regex)
# inner_list.append(regex + '\)' + suffix)
inner_list.append(prefix + regex)
# inner_list.append(prefix + regex + ')' + suffix)
joined_regex = '|'.join(inner_list)
prefix_list.append(joined_regex)
return joined_regex
# if region==None or region==Global:
# prefixed_player_regex_list = map(add_prefixes, player_raw_regex_dict['Global$)'])
# player_regex_list =
# else:
# if region in Regex_list_dict:
# prefixed_player_regex_list = map(add_prefixes, player_raw_regex_dict['Global$)'])
# player_regex_list = map(compile_case_i_re, player_raw_regex_dict['Global$)'])
# Convert all lists in player_raw_regex_dict to a version with regular expression prefixes wildcard and sep added and suffix, then compile both list to all lowercase tags
player_regex_dict = defaultdict(str)
for region_name in player_raw_regex_dict:
# print "-----REGION_NAME", region_name
player_regex_dict[region_name] = map(add_prefixes, player_raw_regex_dict[region_name])
# print "-----PREFIXED_REGEX_DICT", player_regex_dict[region_name]
player_regex_dict[region_name] = map(compile_case_i_re, player_regex_dict[region_name])
# print "-----LOWERCASE", player_regex_dict
# print '\n'
# print "----- PLAYER_REGEX_DICT", player_regex_dict
# Combine region-separated values of player_regex_dict for a truly global list
# Combine region-separated values of sanitized_tags_dict for a truly comprehensive list
full_regex_list = []
full_sanitized_list = []
for region in player_regex_dict:
full_regex_list += player_regex_dict[region]
full_sanitized_list += sanitized_tags_dict[region]
# print "-----FULL_REGEX_LIST", full_regex_list
# print "-----FULL_SANITIZED_LIST", full_sanitized_list
# Wrapper for sanitize_tag.
def check_and_sanitize_tag(tag, *args): #region is optional parameter
# if region is included in parameter, use region list
if len(args)==1 and args[0] is not None:
region_name = args[0]
# DEBUG: print "region name", region_name, type(region_name). Make sure region_name is str and not Region object
if region_name in player_raw_regex_dict and region_name in sanitized_tags_dict:
return sanitize_tag(tag, player_regex_dict[region_name], sanitized_tags_dict[region_name])
elif len(args)==0 or args[0] is None:
region_name = "Global"
return sanitize_tag(tag, full_regex_list, full_sanitized_list)
# Identical to check_and_sanitize_tag, but returns list of all matches
def check_and_sanitize_tag_multiple(tag, *args):
# if region is included in parameter, use region list
if len(args)==1 and args[0] is not None:
region_name = args[0]
if region_name in player_raw_regex_dict and region_name in sanitized_tags_dict:
return sanitize_tag_multiple(tag, player_regex_dict[region_name], sanitized_tags_dict[region_name])
elif len(args)==0 or args[0] is None:
region_name = "Global"
return sanitize_tag_multiple(tag, full_regex_list, full_sanitized_list)
# Function that checks the lengths of the raw regex dict and sanitized tags dict, and prints each index together for comparison
def debug_regex_lists(*args):
# if region is included in parameter, use region list
if len(args)==1 and args[0] is not None:
region_name = args[0]
print "Regex length: ", len(player_raw_regex_dict[region_name])
print "Sanitized length: ", len(sanitized_tags_dict[region_name])
for i in range(len(player_regex_dict[region_name])):
print player_raw_regex_dict[region_name][i], sanitized_tags_dict[region_name][i]
elif len(args)==0 or args[0] is None:
region_name = "Global"
print "Regex length: ", len(player_raw_regex_dict['Global'])
print "Sanitized length: ", len(sanitized_tags_dict['Global'])
for i in range(len(player_regex_dict['Global'])):
print player_raw_regex_dict['Global'][i], sanitized_tags_dict['Global'][i]
| 26.524741 | 176 | 0.499696 |
0d637b5484d27c8d71aac12a32802f93df3395c5
| 43,131 |
py
|
Python
|
azurelinuxagent/common/osutil/default.py
|
matkin-msft/jit_walinuxagent
|
ccd52807ab2c43548a4ef13d86ababeb7e3f8c1f
|
[
"Apache-2.0"
] | null | null | null |
azurelinuxagent/common/osutil/default.py
|
matkin-msft/jit_walinuxagent
|
ccd52807ab2c43548a4ef13d86ababeb7e3f8c1f
|
[
"Apache-2.0"
] | null | null | null |
azurelinuxagent/common/osutil/default.py
|
matkin-msft/jit_walinuxagent
|
ccd52807ab2c43548a4ef13d86ababeb7e3f8c1f
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import array
import base64
import datetime
import fcntl
import glob
import multiprocessing
import os
import platform
import pwd
import re
import shutil
import socket
import struct
import sys
import time
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.utils.fileutil as fileutil
import azurelinuxagent.common.utils.shellutil as shellutil
import azurelinuxagent.common.utils.textutil as textutil
from azurelinuxagent.common.exception import OSUtilError
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.utils.cryptutil import CryptUtil
from azurelinuxagent.common.utils.flexible_version import FlexibleVersion
__RULES_FILES__ = [ "/lib/udev/rules.d/75-persistent-net-generator.rules",
"/etc/udev/rules.d/70-persistent-net.rules" ]
"""
Define distro specific behavior. OSUtil class defines default behavior
for all distros. Each concrete distro classes could overwrite default behavior
if needed.
"""
IPTABLES_VERSION_PATTERN = re.compile("^[^\d\.]*([\d\.]+).*$")
IPTABLES_VERSION = "iptables --version"
IPTABLES_LOCKING_VERSION = FlexibleVersion('1.4.21')
FIREWALL_ACCEPT = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m owner --uid-owner {3} -j ACCEPT"
# Note:
# -- Initially "flight" the change to ACCEPT packets and develop a metric baseline
# A subsequent release will convert the ACCEPT to DROP
# FIREWALL_DROP = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m conntrack --ctstate INVALID,NEW -j ACCEPT"
FIREWALL_DROP = "iptables {0} -t security -{1} OUTPUT -d {2} -p tcp -m conntrack --ctstate INVALID,NEW -j DROP"
FIREWALL_LIST = "iptables {0} -t security -L -nxv"
FIREWALL_PACKETS = "iptables {0} -t security -L OUTPUT --zero OUTPUT -nxv"
FIREWALL_FLUSH = "iptables {0} -t security --flush"
# Precisely delete the rules created by the agent.
# this rule was used <= 2.2.25. This rule helped to validate our change, and determine impact.
FIREWALL_DELETE_CONNTRACK_ACCEPT = "iptables {0} -t security -D OUTPUT -d {1} -p tcp -m conntrack --ctstate INVALID,NEW -j ACCEPT"
FIREWALL_DELETE_OWNER_ACCEPT = "iptables {0} -t security -D OUTPUT -d {1} -p tcp -m owner --uid-owner {2} -j ACCEPT"
FIREWALL_DELETE_CONNTRACK_DROP = "iptables {0} -t security -D OUTPUT -d {1} -p tcp -m conntrack --ctstate INVALID,NEW -j DROP"
PACKET_PATTERN = "^\s*(\d+)\s+(\d+)\s+DROP\s+.*{0}[^\d]*$"
_enable_firewall = True
DMIDECODE_CMD = 'dmidecode --string system-uuid'
PRODUCT_ID_FILE = '/sys/class/dmi/id/product_uuid'
UUID_PATTERN = re.compile(
r'^\s*[A-F0-9]{8}(?:\-[A-F0-9]{4}){3}\-[A-F0-9]{12}\s*$',
re.IGNORECASE)
IOCTL_SIOCGIFCONF = 0x8912
IOCTL_SIOCGIFFLAGS = 0x8913
IOCTL_SIOCGIFHWADDR = 0x8927
IFNAMSIZ = 16
class DefaultOSUtil(object):
def __init__(self):
self.agent_conf_file_path = '/etc/waagent.conf'
self.selinux = None
self.disable_route_warning = False
def get_firewall_dropped_packets(self, dst_ip=None):
# If a previous attempt failed, do not retry
global _enable_firewall
if not _enable_firewall:
return 0
try:
wait = self.get_firewall_will_wait()
rc, output = shellutil.run_get_output(FIREWALL_PACKETS.format(wait), log_cmd=False)
if rc == 3:
# Transient error that we ignore. This code fires every loop
# of the daemon (60m), so we will get the value eventually.
return 0
if rc != 0:
return -1
pattern = re.compile(PACKET_PATTERN.format(dst_ip))
for line in output.split('\n'):
m = pattern.match(line)
if m is not None:
return int(m.group(1))
return 0
except Exception as e:
_enable_firewall = False
logger.warn("Unable to retrieve firewall packets dropped"
"{0}".format(ustr(e)))
return -1
def get_firewall_will_wait(self):
# Determine if iptables will serialize access
rc, output = shellutil.run_get_output(IPTABLES_VERSION)
if rc != 0:
msg = "Unable to determine version of iptables"
logger.warn(msg)
raise Exception(msg)
m = IPTABLES_VERSION_PATTERN.match(output)
if m is None:
msg = "iptables did not return version information"
logger.warn(msg)
raise Exception(msg)
wait = "-w" \
if FlexibleVersion(m.group(1)) >= IPTABLES_LOCKING_VERSION \
else ""
return wait
def _delete_rule(self, rule):
"""
Continually execute the delete operation until the return
code is non-zero or the limit has been reached.
"""
for i in range(1, 100):
rc = shellutil.run(rule, chk_err=False)
if rc == 1:
return
elif rc == 2:
raise Exception("invalid firewall deletion rule '{0}'".format(rule))
def remove_firewall(self, dst_ip=None, uid=None):
# If a previous attempt failed, do not retry
global _enable_firewall
if not _enable_firewall:
return False
try:
if dst_ip is None or uid is None:
msg = "Missing arguments to enable_firewall"
logger.warn(msg)
raise Exception(msg)
wait = self.get_firewall_will_wait()
# This rule was <= 2.2.25 only, and may still exist on some VMs. Until 2.2.25
# has aged out, keep this cleanup in place.
self._delete_rule(FIREWALL_DELETE_CONNTRACK_ACCEPT.format(wait, dst_ip))
self._delete_rule(FIREWALL_DELETE_OWNER_ACCEPT.format(wait, dst_ip, uid))
self._delete_rule(FIREWALL_DELETE_CONNTRACK_DROP.format(wait, dst_ip))
return True
except Exception as e:
_enable_firewall = False
logger.info("Unable to remove firewall -- "
"no further attempts will be made: "
"{0}".format(ustr(e)))
return False
def enable_firewall(self, dst_ip=None, uid=None):
# If a previous attempt failed, do not retry
global _enable_firewall
if not _enable_firewall:
return False
try:
if dst_ip is None or uid is None:
msg = "Missing arguments to enable_firewall"
logger.warn(msg)
raise Exception(msg)
wait = self.get_firewall_will_wait()
# If the DROP rule exists, make no changes
drop_rule = FIREWALL_DROP.format(wait, "C", dst_ip)
rc = shellutil.run(drop_rule, chk_err=False)
if rc == 0:
logger.verbose("Firewall appears established")
return True
elif rc == 2:
self.remove_firewall(dst_ip, uid)
msg = "please upgrade iptables to a version that supports the -C option"
logger.warn(msg)
raise Exception(msg)
# Otherwise, append both rules
accept_rule = FIREWALL_ACCEPT.format(wait, "A", dst_ip, uid)
drop_rule = FIREWALL_DROP.format(wait, "A", dst_ip)
if shellutil.run(accept_rule) != 0:
msg = "Unable to add ACCEPT firewall rule '{0}'".format(
accept_rule)
logger.warn(msg)
raise Exception(msg)
if shellutil.run(drop_rule) != 0:
msg = "Unable to add DROP firewall rule '{0}'".format(
drop_rule)
logger.warn(msg)
raise Exception(msg)
logger.info("Successfully added Azure fabric firewall rules")
rc, output = shellutil.run_get_output(FIREWALL_LIST.format(wait))
if rc == 0:
logger.info("Firewall rules:\n{0}".format(output))
else:
logger.warn("Listing firewall rules failed: {0}".format(output))
return True
except Exception as e:
_enable_firewall = False
logger.info("Unable to establish firewall -- "
"no further attempts will be made: "
"{0}".format(ustr(e)))
return False
def _correct_instance_id(self, id):
'''
Azure stores the instance ID with an incorrect byte ordering for the
first parts. For example, the ID returned by the metadata service:
D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8
will be found as:
544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8
This code corrects the byte order such that it is consistent with
that returned by the metadata service.
'''
if not UUID_PATTERN.match(id):
return id
parts = id.split('-')
return '-'.join([
textutil.swap_hexstring(parts[0], width=2),
textutil.swap_hexstring(parts[1], width=2),
textutil.swap_hexstring(parts[2], width=2),
parts[3],
parts[4]
])
def is_current_instance_id(self, id_that):
'''
Compare two instance IDs for equality, but allow that some IDs
may have been persisted using the incorrect byte ordering.
'''
id_this = self.get_instance_id()
return id_that == id_this or \
id_that == self._correct_instance_id(id_this)
def get_agent_conf_file_path(self):
return self.agent_conf_file_path
def get_instance_id(self):
'''
Azure records a UUID as the instance ID
First check /sys/class/dmi/id/product_uuid.
If that is missing, then extracts from dmidecode
If nothing works (for old VMs), return the empty string
'''
if os.path.isfile(PRODUCT_ID_FILE):
s = fileutil.read_file(PRODUCT_ID_FILE).strip()
else:
rc, s = shellutil.run_get_output(DMIDECODE_CMD)
if rc != 0 or UUID_PATTERN.match(s) is None:
return ""
return self._correct_instance_id(s.strip())
def get_userentry(self, username):
try:
return pwd.getpwnam(username)
except KeyError:
return None
def is_sys_user(self, username):
"""
Check whether use is a system user.
If reset sys user is allowed in conf, return False
Otherwise, check whether UID is less than UID_MIN
"""
if conf.get_allow_reset_sys_user():
return False
userentry = self.get_userentry(username)
uidmin = None
try:
uidmin_def = fileutil.get_line_startingwith("UID_MIN",
"/etc/login.defs")
if uidmin_def is not None:
uidmin = int(uidmin_def.split()[1])
except IOError as e:
pass
if uidmin == None:
uidmin = 100
if userentry != None and userentry[2] < uidmin:
return True
else:
return False
def useradd(self, username, expiration=None):
"""
Create user account with 'username'
"""
userentry = self.get_userentry(username)
if userentry is not None:
logger.info("User {0} already exists, skip useradd", username)
return
if expiration is not None:
cmd = "useradd -m {0} -e {1}".format(username, expiration)
else:
cmd = "useradd -m {0}".format(username)
retcode, out = shellutil.run_get_output(cmd)
if retcode != 0:
raise OSUtilError(("Failed to create user account:{0}, "
"retcode:{1}, "
"output:{2}").format(username, retcode, out))
def chpasswd(self, username, password, crypt_id=6, salt_len=10):
if self.is_sys_user(username):
raise OSUtilError(("User {0} is a system user, "
"will not set password.").format(username))
passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len)
cmd = "usermod -p '{0}' {1}".format(passwd_hash, username)
ret, output = shellutil.run_get_output(cmd, log_cmd=False)
if ret != 0:
raise OSUtilError(("Failed to set password for {0}: {1}"
"").format(username, output))
def conf_sudoer(self, username, nopasswd=False, remove=False):
sudoers_dir = conf.get_sudoers_dir()
sudoers_wagent = os.path.join(sudoers_dir, 'waagent')
if not remove:
# for older distros create sudoers.d
if not os.path.isdir(sudoers_dir):
sudoers_file = os.path.join(sudoers_dir, '../sudoers')
# create the sudoers.d directory
os.mkdir(sudoers_dir)
# add the include of sudoers.d to the /etc/sudoers
sudoers = '\n#includedir ' + sudoers_dir + '\n'
fileutil.append_file(sudoers_file, sudoers)
sudoer = None
if nopasswd:
sudoer = "{0} ALL=(ALL) NOPASSWD: ALL".format(username)
else:
sudoer = "{0} ALL=(ALL) ALL".format(username)
if not os.path.isfile(sudoers_wagent) or \
fileutil.findstr_in_file(sudoers_wagent, sudoer) is False:
fileutil.append_file(sudoers_wagent, "{0}\n".format(sudoer))
fileutil.chmod(sudoers_wagent, 0o440)
else:
# remove user from sudoers
if os.path.isfile(sudoers_wagent):
try:
content = fileutil.read_file(sudoers_wagent)
sudoers = content.split("\n")
sudoers = [x for x in sudoers if username not in x]
fileutil.write_file(sudoers_wagent, "\n".join(sudoers))
except IOError as e:
raise OSUtilError("Failed to remove sudoer: {0}".format(e))
def del_root_password(self):
try:
passwd_file_path = conf.get_passwd_file_path()
passwd_content = fileutil.read_file(passwd_file_path)
passwd = passwd_content.split('\n')
new_passwd = [x for x in passwd if not x.startswith("root:")]
new_passwd.insert(0, "root:*LOCK*:14600::::::")
fileutil.write_file(passwd_file_path, "\n".join(new_passwd))
except IOError as e:
raise OSUtilError("Failed to delete root password:{0}".format(e))
def _norm_path(self, filepath):
home = conf.get_home_dir()
# Expand HOME variable if present in path
path = os.path.normpath(filepath.replace("$HOME", home))
return path
def deploy_ssh_keypair(self, username, keypair):
"""
Deploy id_rsa and id_rsa.pub
"""
path, thumbprint = keypair
path = self._norm_path(path)
dir_path = os.path.dirname(path)
fileutil.mkdir(dir_path, mode=0o700, owner=username)
lib_dir = conf.get_lib_dir()
prv_path = os.path.join(lib_dir, thumbprint + '.prv')
if not os.path.isfile(prv_path):
raise OSUtilError("Can't find {0}.prv".format(thumbprint))
shutil.copyfile(prv_path, path)
pub_path = path + '.pub'
crytputil = CryptUtil(conf.get_openssl_cmd())
pub = crytputil.get_pubkey_from_prv(prv_path)
fileutil.write_file(pub_path, pub)
self.set_selinux_context(pub_path, 'unconfined_u:object_r:ssh_home_t:s0')
self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0')
os.chmod(path, 0o644)
os.chmod(pub_path, 0o600)
def openssl_to_openssh(self, input_file, output_file):
cryptutil = CryptUtil(conf.get_openssl_cmd())
cryptutil.crt_to_ssh(input_file, output_file)
def deploy_ssh_pubkey(self, username, pubkey):
"""
Deploy authorized_key
"""
path, thumbprint, value = pubkey
if path is None:
raise OSUtilError("Public key path is None")
crytputil = CryptUtil(conf.get_openssl_cmd())
path = self._norm_path(path)
dir_path = os.path.dirname(path)
fileutil.mkdir(dir_path, mode=0o700, owner=username)
if value is not None:
if not value.startswith("ssh-"):
raise OSUtilError("Bad public key: {0}".format(value))
fileutil.write_file(path, value)
elif thumbprint is not None:
lib_dir = conf.get_lib_dir()
crt_path = os.path.join(lib_dir, thumbprint + '.crt')
if not os.path.isfile(crt_path):
raise OSUtilError("Can't find {0}.crt".format(thumbprint))
pub_path = os.path.join(lib_dir, thumbprint + '.pub')
pub = crytputil.get_pubkey_from_crt(crt_path)
fileutil.write_file(pub_path, pub)
self.set_selinux_context(pub_path,
'unconfined_u:object_r:ssh_home_t:s0')
self.openssl_to_openssh(pub_path, path)
fileutil.chmod(pub_path, 0o600)
else:
raise OSUtilError("SSH public key Fingerprint and Value are None")
self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0')
fileutil.chowner(path, username)
fileutil.chmod(path, 0o644)
def is_selinux_system(self):
"""
Checks and sets self.selinux = True if SELinux is available on system.
"""
if self.selinux == None:
if shellutil.run("which getenforce", chk_err=False) == 0:
self.selinux = True
else:
self.selinux = False
return self.selinux
def is_selinux_enforcing(self):
"""
Calls shell command 'getenforce' and returns True if 'Enforcing'.
"""
if self.is_selinux_system():
output = shellutil.run_get_output("getenforce")[1]
return output.startswith("Enforcing")
else:
return False
def set_selinux_context(self, path, con):
"""
Calls shell 'chcon' with 'path' and 'con' context.
Returns exit result.
"""
if self.is_selinux_system():
if not os.path.exists(path):
logger.error("Path does not exist: {0}".format(path))
return 1
return shellutil.run('chcon ' + con + ' ' + path)
def conf_sshd(self, disable_password):
option = "no" if disable_password else "yes"
conf_file_path = conf.get_sshd_conf_file_path()
conf_file = fileutil.read_file(conf_file_path).split("\n")
textutil.set_ssh_config(conf_file, "PasswordAuthentication", option)
textutil.set_ssh_config(conf_file, "ChallengeResponseAuthentication", option)
textutil.set_ssh_config(conf_file, "ClientAliveInterval", str(conf.get_ssh_client_alive_interval()))
fileutil.write_file(conf_file_path, "\n".join(conf_file))
logger.info("{0} SSH password-based authentication methods."
.format("Disabled" if disable_password else "Enabled"))
logger.info("Configured SSH client probing to keep connections alive.")
def get_dvd_device(self, dev_dir='/dev'):
pattern = r'(sr[0-9]|hd[c-z]|cdrom[0-9]|cd[0-9])'
device_list = os.listdir(dev_dir)
for dvd in [re.match(pattern, dev) for dev in device_list]:
if dvd is not None:
return "/dev/{0}".format(dvd.group(0))
inner_detail = "The following devices were found, but none matched " \
"the pattern [{0}]: {1}\n".format(pattern, device_list)
raise OSUtilError(msg="Failed to get dvd device from {0}".format(dev_dir),
inner=inner_detail)
def mount_dvd(self,
max_retry=6,
chk_err=True,
dvd_device=None,
mount_point=None,
sleep_time=5):
if dvd_device is None:
dvd_device = self.get_dvd_device()
if mount_point is None:
mount_point = conf.get_dvd_mount_point()
mount_list = shellutil.run_get_output("mount")[1]
existing = self.get_mount_point(mount_list, dvd_device)
if existing is not None:
# already mounted
logger.info("{0} is already mounted at {1}", dvd_device, existing)
return
if not os.path.isdir(mount_point):
os.makedirs(mount_point)
err = ''
for retry in range(1, max_retry):
return_code, err = self.mount(dvd_device,
mount_point,
option="-o ro -t udf,iso9660",
chk_err=False)
if return_code == 0:
logger.info("Successfully mounted dvd")
return
else:
logger.warn(
"Mounting dvd failed [retry {0}/{1}, sleeping {2} sec]",
retry,
max_retry - 1,
sleep_time)
if retry < max_retry:
time.sleep(sleep_time)
if chk_err:
raise OSUtilError("Failed to mount dvd device", inner=err)
def umount_dvd(self, chk_err=True, mount_point=None):
if mount_point is None:
mount_point = conf.get_dvd_mount_point()
return_code = self.umount(mount_point, chk_err=chk_err)
if chk_err and return_code != 0:
raise OSUtilError("Failed to unmount dvd device at {0}",
mount_point)
def eject_dvd(self, chk_err=True):
dvd = self.get_dvd_device()
retcode = shellutil.run("eject {0}".format(dvd))
if chk_err and retcode != 0:
raise OSUtilError("Failed to eject dvd: ret={0}".format(retcode))
def try_load_atapiix_mod(self):
try:
self.load_atapiix_mod()
except Exception as e:
logger.warn("Could not load ATAPI driver: {0}".format(e))
def load_atapiix_mod(self):
if self.is_atapiix_mod_loaded():
return
ret, kern_version = shellutil.run_get_output("uname -r")
if ret != 0:
raise Exception("Failed to call uname -r")
mod_path = os.path.join('/lib/modules',
kern_version.strip('\n'),
'kernel/drivers/ata/ata_piix.ko')
if not os.path.isfile(mod_path):
raise Exception("Can't find module file:{0}".format(mod_path))
ret, output = shellutil.run_get_output("insmod " + mod_path)
if ret != 0:
raise Exception("Error calling insmod for ATAPI CD-ROM driver")
if not self.is_atapiix_mod_loaded(max_retry=3):
raise Exception("Failed to load ATAPI CD-ROM driver")
def is_atapiix_mod_loaded(self, max_retry=1):
for retry in range(0, max_retry):
ret = shellutil.run("lsmod | grep ata_piix", chk_err=False)
if ret == 0:
logger.info("Module driver for ATAPI CD-ROM is already present.")
return True
if retry < max_retry - 1:
time.sleep(1)
return False
def mount(self, dvd, mount_point, option="", chk_err=True):
cmd = "mount {0} {1} {2}".format(option, dvd, mount_point)
retcode, err = shellutil.run_get_output(cmd, chk_err)
if retcode != 0:
detail = "[{0}] returned {1}: {2}".format(cmd, retcode, err)
err = detail
return retcode, err
def umount(self, mount_point, chk_err=True):
return shellutil.run("umount {0}".format(mount_point), chk_err=chk_err)
def allow_dhcp_broadcast(self):
# Open DHCP port if iptables is enabled.
# We supress error logging on error.
shellutil.run("iptables -D INPUT -p udp --dport 68 -j ACCEPT",
chk_err=False)
shellutil.run("iptables -I INPUT -p udp --dport 68 -j ACCEPT",
chk_err=False)
def remove_rules_files(self, rules_files=__RULES_FILES__):
lib_dir = conf.get_lib_dir()
for src in rules_files:
file_name = fileutil.base_name(src)
dest = os.path.join(lib_dir, file_name)
if os.path.isfile(dest):
os.remove(dest)
if os.path.isfile(src):
logger.warn("Move rules file {0} to {1}", file_name, dest)
shutil.move(src, dest)
def restore_rules_files(self, rules_files=__RULES_FILES__):
lib_dir = conf.get_lib_dir()
for dest in rules_files:
filename = fileutil.base_name(dest)
src = os.path.join(lib_dir, filename)
if os.path.isfile(dest):
continue
if os.path.isfile(src):
logger.warn("Move rules file {0} to {1}", filename, dest)
shutil.move(src, dest)
def get_mac_addr(self):
"""
Convienience function, returns mac addr bound to
first non-loopback interface.
"""
ifname=''
while len(ifname) < 2 :
ifname=self.get_first_if()[0]
addr = self.get_if_mac(ifname)
return textutil.hexstr_to_bytearray(addr)
def get_if_mac(self, ifname):
"""
Return the mac-address bound to the socket.
"""
sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
param = struct.pack('256s', (ifname[:15]+('\0'*241)).encode('latin-1'))
info = fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFHWADDR, param)
sock.close()
return ''.join(['%02X' % textutil.str_to_ord(char) for char in info[18:24]])
@staticmethod
def _get_struct_ifconf_size():
"""
Return the sizeof struct ifinfo. On 64-bit platforms the size is 40 bytes;
on 32-bit platforms the size is 32 bytes.
"""
python_arc = platform.architecture()[0]
struct_size = 32 if python_arc == '32bit' else 40
return struct_size
def _get_all_interfaces(self):
"""
Return a dictionary mapping from interface name to IPv4 address.
Interfaces without a name are ignored.
"""
expected=16 # how many devices should I expect...
struct_size = DefaultOSUtil._get_struct_ifconf_size()
array_size = expected * struct_size
buff = array.array('B', b'\0' * array_size)
param = struct.pack('iL', array_size, buff.buffer_info()[0])
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
ret = fcntl.ioctl(sock.fileno(), IOCTL_SIOCGIFCONF, param)
retsize = (struct.unpack('iL', ret)[0])
sock.close()
if retsize == array_size:
logger.warn(('SIOCGIFCONF returned more than {0} up '
'network interfaces.'), expected)
ifconf_buff = buff.tostring()
ifaces = {}
for i in range(0, array_size, struct_size):
iface = ifconf_buff[i:i+IFNAMSIZ].split(b'\0', 1)[0]
if len(iface) > 0:
iface_name = iface.decode('latin-1')
if iface_name not in ifaces:
ifaces[iface_name] = socket.inet_ntoa(ifconf_buff[i+20:i+24])
return ifaces
def get_first_if(self):
"""
Return the interface name, and IPv4 addr of the "primary" interface or,
failing that, any active non-loopback interface.
"""
primary = self.get_primary_interface()
ifaces = self._get_all_interfaces()
if primary in ifaces:
return primary, ifaces[primary]
logger.warn(('Primary interface {0} not found in ifconf list'), primary)
for iface_name in ifaces.keys():
if not self.is_loopback(iface_name):
if not self.disable_route_warning:
logger.info("Choosing non-primary {0}".format(iface_name))
return iface_name, ifaces[iface_name]
msg = 'No non-loopback interface found in ifconf list'
logger.warn(msg)
raise Exception(msg)
def get_primary_interface(self):
"""
Get the name of the primary interface, which is the one with the
default route attached to it; if there are multiple default routes,
the primary has the lowest Metric.
:return: the interface which has the default route
"""
# from linux/route.h
RTF_GATEWAY = 0x02
DEFAULT_DEST = "00000000"
hdr_iface = "Iface"
hdr_dest = "Destination"
hdr_flags = "Flags"
hdr_metric = "Metric"
idx_iface = -1
idx_dest = -1
idx_flags = -1
idx_metric = -1
primary = None
primary_metric = None
if not self.disable_route_warning:
logger.info("Examine /proc/net/route for primary interface")
with open('/proc/net/route') as routing_table:
idx = 0
for header in filter(lambda h: len(h) > 0, routing_table.readline().strip(" \n").split("\t")):
if header == hdr_iface:
idx_iface = idx
elif header == hdr_dest:
idx_dest = idx
elif header == hdr_flags:
idx_flags = idx
elif header == hdr_metric:
idx_metric = idx
idx = idx + 1
for entry in routing_table.readlines():
route = entry.strip(" \n").split("\t")
if route[idx_dest] == DEFAULT_DEST and int(route[idx_flags]) & RTF_GATEWAY == RTF_GATEWAY:
metric = int(route[idx_metric])
iface = route[idx_iface]
if primary is None or metric < primary_metric:
primary = iface
primary_metric = metric
if primary is None:
primary = ''
if not self.disable_route_warning:
with open('/proc/net/route') as routing_table_fh:
routing_table_text = routing_table_fh.read()
logger.warn('Could not determine primary interface, '
'please ensure /proc/net/route is correct')
logger.warn('Contents of /proc/net/route:\n{0}'.format(routing_table_text))
logger.warn('Primary interface examination will retry silently')
self.disable_route_warning = True
else:
logger.info('Primary interface is [{0}]'.format(primary))
self.disable_route_warning = False
return primary
def is_primary_interface(self, ifname):
"""
Indicate whether the specified interface is the primary.
:param ifname: the name of the interface - eth0, lo, etc.
:return: True if this interface binds the default route
"""
return self.get_primary_interface() == ifname
def is_loopback(self, ifname):
"""
Determine if a named interface is loopback.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
ifname_buff = ifname + ('\0'*256)
result = fcntl.ioctl(s.fileno(), IOCTL_SIOCGIFFLAGS, ifname_buff)
flags, = struct.unpack('H', result[16:18])
isloopback = flags & 8 == 8
if not self.disable_route_warning:
logger.info('interface [{0}] has flags [{1}], '
'is loopback [{2}]'.format(ifname, flags, isloopback))
s.close()
return isloopback
def get_dhcp_lease_endpoint(self):
"""
OS specific, this should return the decoded endpoint of
the wireserver from option 245 in the dhcp leases file
if it exists on disk.
:return: The endpoint if available, or None
"""
return None
@staticmethod
def get_endpoint_from_leases_path(pathglob):
"""
Try to discover and decode the wireserver endpoint in the
specified dhcp leases path.
:param pathglob: The path containing dhcp lease files
:return: The endpoint if available, otherwise None
"""
endpoint = None
HEADER_LEASE = "lease"
HEADER_OPTION = "option unknown-245"
HEADER_DNS = "option domain-name-servers"
HEADER_EXPIRE = "expire"
FOOTER_LEASE = "}"
FORMAT_DATETIME = "%Y/%m/%d %H:%M:%S"
logger.info("looking for leases in path [{0}]".format(pathglob))
for lease_file in glob.glob(pathglob):
leases = open(lease_file).read()
if HEADER_OPTION in leases:
cached_endpoint = None
has_option_245 = False
expired = True # assume expired
for line in leases.splitlines():
if line.startswith(HEADER_LEASE):
cached_endpoint = None
has_option_245 = False
expired = True
elif HEADER_DNS in line:
cached_endpoint = line.replace(HEADER_DNS, '').strip(" ;")
elif HEADER_OPTION in line:
has_option_245 = True
elif HEADER_EXPIRE in line:
if "never" in line:
expired = False
else:
try:
expire_string = line.split(" ", 4)[-1].strip(";")
expire_date = datetime.datetime.strptime(expire_string, FORMAT_DATETIME)
if expire_date > datetime.datetime.utcnow():
expired = False
except:
logger.error("could not parse expiry token '{0}'".format(line))
elif FOOTER_LEASE in line:
logger.info("dhcp entry:{0}, 245:{1}, expired:{2}".format(
cached_endpoint, has_option_245, expired))
if not expired and cached_endpoint is not None and has_option_245:
endpoint = cached_endpoint
logger.info("found endpoint [{0}]".format(endpoint))
# we want to return the last valid entry, so
# keep searching
if endpoint is not None:
logger.info("cached endpoint found [{0}]".format(endpoint))
else:
logger.info("cached endpoint not found")
return endpoint
def is_missing_default_route(self):
routes = shellutil.run_get_output("route -n")[1]
for route in routes.split("\n"):
if route.startswith("0.0.0.0 ") or route.startswith("default "):
return False
return True
def get_if_name(self):
return self.get_first_if()[0]
def get_ip4_addr(self):
return self.get_first_if()[1]
def set_route_for_dhcp_broadcast(self, ifname):
return shellutil.run("route add 255.255.255.255 dev {0}".format(ifname),
chk_err=False)
def remove_route_for_dhcp_broadcast(self, ifname):
shellutil.run("route del 255.255.255.255 dev {0}".format(ifname),
chk_err=False)
def is_dhcp_enabled(self):
return False
def stop_dhcp_service(self):
pass
def start_dhcp_service(self):
pass
def start_network(self):
pass
def start_agent_service(self):
pass
def stop_agent_service(self):
pass
def register_agent_service(self):
pass
def unregister_agent_service(self):
pass
def restart_ssh_service(self):
pass
def route_add(self, net, mask, gateway):
"""
Add specified route using /sbin/route add -net.
"""
cmd = ("/sbin/route add -net "
"{0} netmask {1} gw {2}").format(net, mask, gateway)
return shellutil.run(cmd, chk_err=False)
def get_dhcp_pid(self):
ret = shellutil.run_get_output("pidof dhclient", chk_err=False)
return ret[1] if ret[0] == 0 else None
def set_hostname(self, hostname):
fileutil.write_file('/etc/hostname', hostname)
shellutil.run("hostname {0}".format(hostname), chk_err=False)
def set_dhcp_hostname(self, hostname):
autosend = r'^[^#]*?send\s*host-name.*?(<hostname>|gethostname[(,)])'
dhclient_files = ['/etc/dhcp/dhclient.conf', '/etc/dhcp3/dhclient.conf', '/etc/dhclient.conf']
for conf_file in dhclient_files:
if not os.path.isfile(conf_file):
continue
if fileutil.findre_in_file(conf_file, autosend):
#Return if auto send host-name is configured
return
fileutil.update_conf_file(conf_file,
'send host-name',
'send host-name "{0}";'.format(hostname))
def restart_if(self, ifname, retries=3, wait=5):
retry_limit=retries+1
for attempt in range(1, retry_limit):
return_code=shellutil.run("ifdown {0} && ifup {0}".format(ifname))
if return_code == 0:
return
logger.warn("failed to restart {0}: return code {1}".format(ifname, return_code))
if attempt < retry_limit:
logger.info("retrying in {0} seconds".format(wait))
time.sleep(wait)
else:
logger.warn("exceeded restart retries")
def publish_hostname(self, hostname):
self.set_dhcp_hostname(hostname)
self.set_hostname_record(hostname)
ifname = self.get_if_name()
self.restart_if(ifname)
def set_scsi_disks_timeout(self, timeout):
for dev in os.listdir("/sys/block"):
if dev.startswith('sd'):
self.set_block_device_timeout(dev, timeout)
def set_block_device_timeout(self, dev, timeout):
if dev is not None and timeout is not None:
file_path = "/sys/block/{0}/device/timeout".format(dev)
content = fileutil.read_file(file_path)
original = content.splitlines()[0].rstrip()
if original != timeout:
fileutil.write_file(file_path, timeout)
logger.info("Set block dev timeout: {0} with timeout: {1}",
dev, timeout)
def get_mount_point(self, mountlist, device):
"""
Example of mountlist:
/dev/sda1 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs
(rw,rootcontext="system_u:object_r:tmpfs_t:s0")
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
/dev/sdb1 on /mnt/resource type ext4 (rw)
"""
if (mountlist and device):
for entry in mountlist.split('\n'):
if(re.search(device, entry)):
tokens = entry.split()
#Return the 3rd column of this line
return tokens[2] if len(tokens) > 2 else None
return None
def device_for_ide_port(self, port_id):
"""
Return device name attached to ide port 'n'.
"""
if port_id > 3:
return None
g0 = "00000000"
if port_id > 1:
g0 = "00000001"
port_id = port_id - 2
device = None
path = "/sys/bus/vmbus/devices/"
if os.path.exists(path):
for vmbus in os.listdir(path):
deviceid = fileutil.read_file(os.path.join(path, vmbus, "device_id"))
guid = deviceid.lstrip('{').split('-')
if guid[0] == g0 and guid[1] == "000" + ustr(port_id):
for root, dirs, files in os.walk(path + vmbus):
if root.endswith("/block"):
device = dirs[0]
break
else : #older distros
for d in dirs:
if ':' in d and "block" == d.split(':')[0]:
device = d.split(':')[1]
break
break
return device
def set_hostname_record(self, hostname):
fileutil.write_file(conf.get_published_hostname(), contents=hostname)
def get_hostname_record(self):
hostname_record = conf.get_published_hostname()
if not os.path.exists(hostname_record):
# this file is created at provisioning time with agents >= 2.2.3
hostname = socket.gethostname()
logger.info('Hostname record does not exist, '
'creating [{0}] with hostname [{1}]',
hostname_record,
hostname)
self.set_hostname_record(hostname)
record = fileutil.read_file(hostname_record)
return record
def del_account(self, username):
if self.is_sys_user(username):
logger.error("{0} is a system user. Will not delete it.", username)
shellutil.run("> /var/run/utmp")
shellutil.run("userdel -f -r " + username)
self.conf_sudoer(username, remove=True)
def decode_customdata(self, data):
return base64.b64decode(data).decode('utf-8')
def get_total_mem(self):
# Get total memory in bytes and divide by 1024**2 to get the value in MB.
return os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') / (1024**2)
def get_processor_cores(self):
return multiprocessing.cpu_count()
def check_pid_alive(self, pid):
return pid is not None and os.path.isdir(os.path.join('/proc', pid))
@property
def is_64bit(self):
return sys.maxsize > 2**32
| 39.138838 | 130 | 0.576569 |
428397a0733952980885a9fd8f40e0f0d40d646a
| 1,733 |
py
|
Python
|
code/gpu.py
|
michaelhball/ml_tidbits
|
55b77fded5f31cd280f043c8aa792a07ca572170
|
[
"MIT"
] | 1 |
2021-04-15T19:42:51.000Z
|
2021-04-15T19:42:51.000Z
|
code/gpu.py
|
michaelhball/ml_toolshed
|
55b77fded5f31cd280f043c8aa792a07ca572170
|
[
"MIT"
] | null | null | null |
code/gpu.py
|
michaelhball/ml_toolshed
|
55b77fded5f31cd280f043c8aa792a07ca572170
|
[
"MIT"
] | null | null | null |
import GPUtil
import os
import tensorflow as tf
def configure_gpu_tf():
""" This is an example for how to customise the search for a GPU for a specific job depending on
hardware/organisational requirements. In this case, we have a machine with two GPUs on which we want to support
three simultaneous GPU jobs (& unlimited CPU). """
try:
# locate available devices & set required environment variables
available_device_ids = GPUtil.getFirstAvailable(order='first', maxLoad=0.7, maxMemory=0.7, attempts=1, interval=10)
available_device_id = available_device_ids[0]
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(available_device_id)
print(f"\n GPU Found! running on GPU:{available_device_id}\n")
# set GPU configuration (use all GPU memory if device 0, else use <50% of memory)
tf.debugging.set_log_device_placement(False)
physical_gpu = tf.config.experimental.list_physical_devices('GPU')[0]
if available_device_id == 0:
tf.config.experimental.set_memory_growth(physical_gpu, True)
else:
tf.config.experimental.set_virtual_device_configuration(
physical_gpu,
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4500)]
)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
assert len(logical_gpus) == 1, "error creating virtual GPU to fractionally use memory"
# if we can't find a GPU, or they are all busy, default to using CPU
except RuntimeError:
print("\n No GPUs available... running on CPU\n")
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
| 46.837838 | 123 | 0.691287 |
a7b89dbaa1a7be602a5376ba2673a804d362b6df
| 26,495 |
py
|
Python
|
cupy_setup_build.py
|
weareno1/cupy
|
ac52cce00b69d97b5d99bd1f91caed720b32b2d3
|
[
"MIT"
] | null | null | null |
cupy_setup_build.py
|
weareno1/cupy
|
ac52cce00b69d97b5d99bd1f91caed720b32b2d3
|
[
"MIT"
] | null | null | null |
cupy_setup_build.py
|
weareno1/cupy
|
ac52cce00b69d97b5d99bd1f91caed720b32b2d3
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import argparse
from distutils import ccompiler
from distutils import errors
from distutils import msvccompiler
from distutils import sysconfig
from distutils import unixccompiler
import glob
import os
from os import path
import shutil
import sys
import pkg_resources
import setuptools
from setuptools.command import build_ext
from setuptools.command import sdist
from install import build
from install.build import PLATFORM_DARWIN
from install.build import PLATFORM_LINUX
from install.build import PLATFORM_WIN32
required_cython_version = pkg_resources.parse_version('0.28.0')
ignore_cython_versions = [
]
MODULES = [
{
'name': 'cuda',
'file': [
'cupy.core._dtype',
'cupy.core._kernel',
'cupy.core._routines_indexing',
'cupy.core._routines_logic',
'cupy.core._routines_manipulation',
'cupy.core._routines_math',
'cupy.core._routines_sorting',
'cupy.core._routines_statistics',
'cupy.core._scalar',
'cupy.core.core',
'cupy.core.dlpack',
'cupy.core.flags',
'cupy.core.internal',
'cupy.core.fusion',
'cupy.core.raw',
'cupy.cuda.cublas',
'cupy.cuda.cufft',
'cupy.cuda.curand',
'cupy.cuda.cusparse',
'cupy.cuda.device',
'cupy.cuda.driver',
'cupy.cuda.memory',
'cupy.cuda.memory_hook',
'cupy.cuda.nvrtc',
'cupy.cuda.pinned_memory',
'cupy.cuda.profiler',
'cupy.cuda.function',
'cupy.cuda.stream',
'cupy.cuda.runtime',
'cupy.util',
],
'include': [
'cublas_v2.h',
'cuda.h',
'cuda_profiler_api.h',
'cuda_runtime.h',
'cufft.h',
'curand.h',
'cusparse.h',
'nvrtc.h',
],
'libraries': [
'cublas',
'cuda',
'cudart',
'cufft',
'curand',
'cusparse',
'nvrtc',
],
'check_method': build.check_cuda_version,
'version_method': build.get_cuda_version,
},
{
'name': 'cudnn',
'file': [
'cupy.cuda.cudnn',
'cupy.cudnn',
],
'include': [
'cudnn.h',
],
'libraries': [
'cudnn',
],
'check_method': build.check_cudnn_version,
'version_method': build.get_cudnn_version,
},
{
'name': 'nccl',
'file': [
'cupy.cuda.nccl',
],
'include': [
'nccl.h',
],
'libraries': [
'nccl',
],
'check_method': build.check_nccl_version,
'version_method': build.get_nccl_version,
},
{
'name': 'cusolver',
'file': [
'cupy.cuda.cusolver',
],
'include': [
'cusolverDn.h',
],
'libraries': [
'cusolver',
],
'check_method': build.check_cuda_version,
},
{
'name': 'nvtx',
'file': [
'cupy.cuda.nvtx',
],
'include': [
'nvToolsExt.h',
],
'libraries': [
'nvToolsExt' if not PLATFORM_WIN32 else 'nvToolsExt64_1',
],
'check_method': build.check_nvtx,
},
{
# The value of the key 'file' is a list that contains extension names
# or tuples of an extension name and a list of other souces files
# required to build the extension such as .cpp files and .cu files.
#
# <extension name> | (<extension name>, a list of <other source>)
#
# The extension name is also interpreted as the name of the Cython
# source file required to build the extension with appending '.pyx'
# file extension.
'name': 'thrust',
'file': [
('cupy.cuda.thrust', ['cupy/cuda/cupy_thrust.cu']),
],
'include': [
'thrust/device_ptr.h',
'thrust/sequence.h',
'thrust/sort.h',
],
'libraries': [
'cudart',
],
'check_method': build.check_cuda_version,
}
]
def ensure_module_file(file):
if isinstance(file, tuple):
return file
else:
return file, []
def module_extension_name(file):
return ensure_module_file(file)[0]
def module_extension_sources(file, use_cython, no_cuda):
pyx, others = ensure_module_file(file)
base = path.join(*pyx.split('.'))
if use_cython:
pyx = base + '.pyx'
if not os.path.exists(pyx):
use_cython = False
print(
'NOTICE: Skipping cythonize as {} does not exist.'.format(pyx))
if not use_cython:
pyx = base + '.cpp'
# If CUDA SDK is not available, remove CUDA C files from extension sources
# and use stubs defined in header files.
if no_cuda:
others1 = []
for source in others:
base, ext = os.path.splitext(source)
if ext == '.cu':
continue
others1.append(source)
others = others1
return [pyx] + others
def check_readthedocs_environment():
return os.environ.get('READTHEDOCS', None) == 'True'
def check_library(compiler, includes=(), libraries=(),
include_dirs=(), library_dirs=(), define_macros=None):
source = ''.join(['#include <%s>\n' % header for header in includes])
source += 'int main(int argc, char* argv[]) {return 0;}'
try:
# We need to try to build a shared library because distutils
# uses different option to build an executable and a shared library.
# Especially when a user build an executable, distutils does not use
# LDFLAGS environment variable.
build.build_shlib(compiler, source, libraries,
include_dirs, library_dirs, define_macros)
except Exception as e:
print(e)
sys.stdout.flush()
return False
return True
def preconfigure_modules(compiler, settings):
"""Returns a list of modules buildable in given environment and settings.
For each module in MODULES list, this function checks if the module
can be built in the current environment and reports it.
Returns a list of module names available.
"""
nvcc_path = build.get_nvcc_path()
summary = [
'',
'************************************************************',
'* CuPy Configuration Summary *',
'************************************************************',
'',
'Build Environment:',
' Include directories: {}'.format(str(settings['include_dirs'])),
' Library directories: {}'.format(str(settings['library_dirs'])),
' nvcc command : {}'.format(
nvcc_path if nvcc_path else '(not found)'),
'',
'Environment Variables:',
]
for key in ['CFLAGS', 'LDFLAGS', 'LIBRARY_PATH',
'CUDA_PATH', 'NVTOOLSEXT_PATH', 'NVCC']:
summary += [' {:<16}: {}'.format(key, os.environ.get(key, '(none)'))]
summary += [
'',
'Modules:',
]
ret = []
for module in MODULES:
installed = False
status = 'No'
errmsg = []
print('')
print('-------- Configuring Module: {} --------'.format(
module['name']))
sys.stdout.flush()
if not check_library(compiler,
includes=module['include'],
include_dirs=settings['include_dirs'],
define_macros=settings['define_macros']):
errmsg = ['Include files not found: %s' % module['include'],
'Check your CFLAGS environment variable.']
elif not check_library(compiler,
libraries=module['libraries'],
library_dirs=settings['library_dirs'],
define_macros=settings['define_macros']):
errmsg = ['Cannot link libraries: %s' % module['libraries'],
'Check your LDFLAGS environment variable.']
elif ('check_method' in module and
not module['check_method'](compiler, settings)):
# Fail on per-library condition check (version requirements etc.)
installed = True
errmsg = ['The library is installed but not supported.']
elif module['name'] == 'thrust' and nvcc_path is None:
installed = True
errmsg = ['nvcc command could not be found in PATH.',
'Check your PATH environment variable.']
else:
installed = True
status = 'Yes'
ret.append(module['name'])
if installed and 'version_method' in module:
status += ' (version {})'.format(module['version_method'](True))
summary += [
' {:<10}: {}'.format(module['name'], status)
]
# If error message exists...
if len(errmsg) != 0:
summary += [' -> {}'.format(m) for m in errmsg]
# Skip checking other modules when CUDA is unavailable.
if module['name'] == 'cuda':
break
if len(ret) != len(MODULES):
if 'cuda' in ret:
lines = [
'WARNING: Some modules could not be configured.',
'CuPy will be installed without these modules.',
]
else:
lines = [
'ERROR: CUDA could not be found on your system.',
]
summary += [
'',
] + lines + [
'Please refer to the Installation Guide for details:',
'https://docs-cupy.chainer.org/en/stable/install.html',
'',
]
summary += [
'************************************************************',
'',
]
print('\n'.join(summary))
return ret
def _rpath_base():
if PLATFORM_LINUX:
return '$ORIGIN'
elif PLATFORM_DARWIN:
return '@loader_path'
else:
raise Exception('not supported on this platform')
def make_extensions(options, compiler, use_cython):
"""Produce a list of Extension instances which passed to cythonize()."""
no_cuda = options['no_cuda']
settings = build.get_compiler_setting()
include_dirs = settings['include_dirs']
settings['include_dirs'] = [
x for x in include_dirs if path.exists(x)]
settings['library_dirs'] = [
x for x in settings['library_dirs'] if path.exists(x)]
# Adjust rpath to use CUDA libraries in `cupy/_lib/*.so`) from CuPy.
use_wheel_libs_rpath = (
0 < len(options['wheel_libs']) and not PLATFORM_WIN32)
# This is a workaround for Anaconda.
# Anaconda installs libstdc++ from GCC 4.8 and it is not compatible
# with GCC 5's new ABI.
settings['define_macros'].append(('_GLIBCXX_USE_CXX11_ABI', '0'))
# In the environment with CUDA 7.5 on Ubuntu 16.04, gcc5.3 does not
# automatically deal with memcpy because string.h header file has
# been changed. This is a workaround for that environment.
# See details in the below discussions:
# https://github.com/BVLC/caffe/issues/4046
# https://groups.google.com/forum/#!topic/theano-users/3ihQYiTRG4E
settings['define_macros'].append(('_FORCE_INLINES', '1'))
if options['linetrace']:
settings['define_macros'].append(('CYTHON_TRACE', '1'))
settings['define_macros'].append(('CYTHON_TRACE_NOGIL', '1'))
if no_cuda:
settings['define_macros'].append(('CUPY_NO_CUDA', '1'))
available_modules = []
if no_cuda:
available_modules = [m['name'] for m in MODULES]
else:
available_modules = preconfigure_modules(compiler, settings)
if 'cuda' not in available_modules:
raise Exception('Your CUDA environment is invalid. '
'Please check above error log.')
ret = []
for module in MODULES:
if module['name'] not in available_modules:
continue
s = settings.copy()
if not no_cuda:
s['libraries'] = module['libraries']
compile_args = s.setdefault('extra_compile_args', [])
link_args = s.setdefault('extra_link_args', [])
if module['name'] == 'cusolver':
compile_args = s.setdefault('extra_compile_args', [])
link_args = s.setdefault('extra_link_args', [])
# openmp is required for cusolver
if compiler.compiler_type == 'unix' and not PLATFORM_DARWIN:
# In mac environment, openmp is not required.
compile_args.append('-fopenmp')
link_args.append('-fopenmp')
elif compiler.compiler_type == 'msvc':
compile_args.append('/openmp')
if (PLATFORM_LINUX and s['library_dirs']) or PLATFORM_DARWIN:
ldflag = '-Wl,'
if PLATFORM_LINUX:
ldflag += '--disable-new-dtags,'
ldflag += ','.join('-rpath,' + p
for p in s['library_dirs'])
args = s.setdefault('extra_link_args', [])
args.append(ldflag)
if PLATFORM_DARWIN:
# -rpath is only supported when targetting Mac OS X 10.5 or
# later
args.append('-mmacosx-version-min=10.5')
for f in module['file']:
name = module_extension_name(f)
rpath = []
if not options['no_rpath']:
# Add library directories (e.g., `/usr/local/cuda/lib64`) to
# RPATH.
rpath += s['library_dirs']
if use_wheel_libs_rpath:
# Add `cupy/_lib` (where shared libraries included in wheels
# reside) to RPATH.
# The path is resolved relative to the module, e.g., use
# `$ORIGIN/_lib` for `cupy/cudnn.so` and `$ORIGIN/../_lib` for
# `cupy/cuda/cudnn.so`.
depth = name.count('.') - 1
rpath.append('{}{}/_lib'.format(_rpath_base(), '/..' * depth))
if not PLATFORM_WIN32 and not PLATFORM_LINUX:
s['runtime_library_dirs'] = rpath
sources = module_extension_sources(f, use_cython, no_cuda)
extension = setuptools.Extension(name, sources, **s)
ret.append(extension)
return ret
def parse_args():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'--cupy-package-name', type=str, default='cupy',
help='alternate package name')
parser.add_argument(
'--cupy-long-description', type=str, default=None,
help='path to the long description file')
parser.add_argument(
'--cupy-wheel-lib', type=str, action='append', default=[],
help='shared library to copy into the wheel '
'(can be specified for multiple times)')
parser.add_argument(
'--cupy-no-rpath', action='store_true', default=False,
help='disable adding default library directories to RPATH')
parser.add_argument(
'--cupy-profile', action='store_true', default=False,
help='enable profiling for Cython code')
parser.add_argument(
'--cupy-coverage', action='store_true', default=False,
help='enable coverage for Cython code')
parser.add_argument(
'--cupy-no-cuda', action='store_true', default=False,
help='build CuPy with stub header file')
opts, sys.argv = parser.parse_known_args(sys.argv)
arg_options = {
'package_name': opts.cupy_package_name,
'long_description': opts.cupy_long_description,
'wheel_libs': opts.cupy_wheel_lib, # list
'no_rpath': opts.cupy_no_rpath,
'profile': opts.cupy_profile,
'linetrace': opts.cupy_coverage,
'annotate': opts.cupy_coverage,
'no_cuda': opts.cupy_no_cuda,
}
if check_readthedocs_environment():
arg_options['no_cuda'] = True
return arg_options
cupy_setup_options = parse_args()
print('Options:', cupy_setup_options)
def get_package_name():
return cupy_setup_options['package_name']
def get_long_description():
path = cupy_setup_options['long_description']
if path is None:
return None
with open(path) as f:
return f.read()
def prepare_wheel_libs():
"""Prepare shared libraries for wheels.
On Windows, DLLs will be placed under `cupy/cuda`.
On other platforms, shared libraries are placed under `cupy/_libs` and
RUNPATH will be set to this directory later.
"""
libdirname = None
if PLATFORM_WIN32:
libdirname = 'cuda'
# Clean up existing libraries.
libfiles = glob.glob('cupy/{}/*.dll'.format(libdirname))
for libfile in libfiles:
print("Removing file: {}".format(libfile))
os.remove(libfile)
else:
libdirname = '_lib'
# Clean up the library directory.
libdir = 'cupy/{}'.format(libdirname)
if os.path.exists(libdir):
print("Removing directory: {}".format(libdir))
shutil.rmtree(libdir)
os.mkdir(libdir)
# Copy specified libraries to the library directory.
libs = []
for lib in cupy_setup_options['wheel_libs']:
# Note: symlink is resolved by shutil.copy2.
print("Copying library for wheel: {}".format(lib))
libname = path.basename(lib)
libpath = 'cupy/{}/{}'.format(libdirname, libname)
shutil.copy2(lib, libpath)
libs.append('{}/{}'.format(libdirname, libname))
return libs
try:
import Cython
import Cython.Build
cython_version = pkg_resources.parse_version(Cython.__version__)
cython_available = (
cython_version >= required_cython_version and
cython_version not in ignore_cython_versions)
except ImportError:
cython_available = False
def cythonize(extensions, arg_options):
directive_keys = ('linetrace', 'profile')
directives = {key: arg_options[key] for key in directive_keys}
# Embed signatures for Sphinx documentation.
directives['embedsignature'] = True
cythonize_option_keys = ('annotate',)
cythonize_options = {key: arg_options[key]
for key in cythonize_option_keys}
return Cython.Build.cythonize(
extensions, verbose=True, language_level=3,
compiler_directives=directives, **cythonize_options)
def check_extensions(extensions):
for x in extensions:
for f in x.sources:
if not path.isfile(f):
raise RuntimeError('''\
Missing file: {}
Please install Cython {} or later. Please also check the version of Cython.
See https://docs-cupy.chainer.org/en/stable/install.html for details.
'''.format(f, required_cython_version))
def get_ext_modules(use_cython=False):
arg_options = cupy_setup_options
# We need to call get_config_vars to initialize _config_vars in distutils
# see #1849
sysconfig.get_config_vars()
compiler = ccompiler.new_compiler()
sysconfig.customize_compiler(compiler)
extensions = make_extensions(arg_options, compiler, use_cython)
return extensions
def _nvcc_gencode_options(cuda_version):
"""Returns NVCC GPU code generation options."""
# The arch_list specifies virtual architectures, such as 'compute_61', and
# real architectures, such as 'sm_61', for which the CUDA input files are
# to be compiled.
#
# The syntax of an entry of the list is
#
# entry ::= virtual_arch | (virtual_arch, real_arch)
#
# where virtual_arch is a string which means a virtual architecture and
# real_arch is a string which means a real architecture.
#
# If a virtual architecture is supplied, NVCC generates a PTX code for the
# virtual architecture. If a pair of a virtual architecture and a real
# architecture is supplied, NVCC generates a PTX code for the virtual
# architecture as well as a cubin code for the real architecture.
#
# For example, making NVCC generate a PTX code for 'compute_60' virtual
# architecture, the arch_list has an entry of 'compute_60'.
#
# arch_list = ['compute_60']
#
# For another, making NVCC generate a PTX code for 'compute_61' virtual
# architecture and a cubin code for 'sm_61' real architecture, the
# arch_list has an entry of ('compute_61', 'sm_61').
#
# arch_list = [('compute_61', 'sm_61')]
arch_list = ['compute_30',
'compute_50']
if cuda_version >= 9000:
arch_list += [('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
'compute_70']
elif cuda_version >= 8000:
arch_list += [('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
'compute_60']
options = []
for arch in arch_list:
if type(arch) is tuple:
virtual_arch, real_arch = arch
options.append('--generate-code=arch={},code={}'.format(
virtual_arch, real_arch))
else:
options.append('--generate-code=arch={},code={}'.format(
arch, arch))
if sys.argv == ['setup.py', 'develop']:
return []
else:
return options
class _UnixCCompiler(unixccompiler.UnixCCompiler):
src_extensions = list(unixccompiler.UnixCCompiler.src_extensions)
src_extensions.append('.cu')
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
# For sources other than CUDA C ones, just call the super class method.
if os.path.splitext(src)[1] != '.cu':
return unixccompiler.UnixCCompiler._compile(
self, obj, src, ext, cc_args, extra_postargs, pp_opts)
# For CUDA C source files, compile them with NVCC.
_compiler_so = self.compiler_so
try:
nvcc_path = build.get_nvcc_path()
base_opts = build.get_compiler_base_options()
self.set_executable('compiler_so', nvcc_path)
cuda_version = build.get_cuda_version()
postargs = _nvcc_gencode_options(cuda_version) + [
'-O2', '--compiler-options="-fPIC"']
print('NVCC options:', postargs)
return unixccompiler.UnixCCompiler._compile(
self, obj, src, ext, base_opts + cc_args, postargs, pp_opts)
finally:
self.compiler_so = _compiler_so
class _MSVCCompiler(msvccompiler.MSVCCompiler):
_cu_extensions = ['.cu']
src_extensions = list(unixccompiler.UnixCCompiler.src_extensions)
src_extensions.extend(_cu_extensions)
def _compile_cu(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
# Compile CUDA C files, mainly derived from UnixCCompiler._compile().
macros, objects, extra_postargs, pp_opts, _build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
compiler_so = build.get_nvcc_path()
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
cuda_version = build.get_cuda_version()
postargs = _nvcc_gencode_options(cuda_version) + ['-O2']
postargs += ['-Xcompiler', '/MD']
print('NVCC options:', postargs)
for obj in objects:
try:
src, ext = _build[obj]
except KeyError:
continue
try:
self.spawn(compiler_so + cc_args + [src, '-o', obj] + postargs)
except errors.DistutilsExecError as e:
raise errors.CompileError(str(e))
return objects
def compile(self, sources, **kwargs):
# Split CUDA C sources and others.
cu_sources = []
other_sources = []
for source in sources:
if os.path.splitext(source)[1] == '.cu':
cu_sources.append(source)
else:
other_sources.append(source)
# Compile source files other than CUDA C ones.
other_objects = msvccompiler.MSVCCompiler.compile(
self, other_sources, **kwargs)
# Compile CUDA C sources.
cu_objects = self._compile_cu(cu_sources, **kwargs)
# Return compiled object filenames.
return other_objects + cu_objects
class sdist_with_cython(sdist.sdist):
"""Custom `sdist` command with cyhonizing."""
def __init__(self, *args, **kwargs):
if not cython_available:
raise RuntimeError('Cython is required to make sdist.')
ext_modules = get_ext_modules(True) # get .pyx modules
cythonize(ext_modules, cupy_setup_options)
sdist.sdist.__init__(self, *args, **kwargs)
class custom_build_ext(build_ext.build_ext):
"""Custom `build_ext` command to include CUDA C source files."""
def run(self):
if build.get_nvcc_path() is not None:
def wrap_new_compiler(func):
def _wrap_new_compiler(*args, **kwargs):
try:
return func(*args, **kwargs)
except errors.DistutilsPlatformError:
if not PLATFORM_WIN32:
CCompiler = _UnixCCompiler
else:
CCompiler = _MSVCCompiler
return CCompiler(
None, kwargs['dry_run'], kwargs['force'])
return _wrap_new_compiler
ccompiler.new_compiler = wrap_new_compiler(ccompiler.new_compiler)
# Intentionally causes DistutilsPlatformError in
# ccompiler.new_compiler() function to hook.
self.compiler = 'nvidia'
if cython_available:
ext_modules = get_ext_modules(True) # get .pyx modules
cythonize(ext_modules, cupy_setup_options)
check_extensions(self.extensions)
build_ext.build_ext.run(self)
| 33.708651 | 79 | 0.577316 |
918f436b82db51a2ad8adbde7cb609067acc167e
| 408 |
py
|
Python
|
tests/data/error_settings6.py
|
voyagegroup/apns-proxy-server
|
5858d1b33d37b9333ca153cd92f091bad9537455
|
[
"BSD-2-Clause"
] | 16 |
2015-01-20T22:54:43.000Z
|
2021-07-07T03:33:04.000Z
|
tests/data/error_settings6.py
|
voyagegroup/apns-proxy-server
|
5858d1b33d37b9333ca153cd92f091bad9537455
|
[
"BSD-2-Clause"
] | null | null | null |
tests/data/error_settings6.py
|
voyagegroup/apns-proxy-server
|
5858d1b33d37b9333ca153cd92f091bad9537455
|
[
"BSD-2-Clause"
] | 6 |
2015-01-22T05:00:36.000Z
|
2022-03-03T15:20:00.000Z
|
# -*- coding: utf-8 -*-
import logging
LOG_LEVEL = logging.INFO
# クライアントを待ちうけるポート
BIND_PORT_FOR_ENTRY = 5556
# PUSH-PULL用のポート
BIND_PORT_FOR_PULL = 5557
# アプリ毎のワーカースレッドの数
THREAD_NUMS_PER_APPLICATION = 5
# アプリ毎のAPNsの設定
APPLICATIONS = [
{
"application_id": "14",
"name": "My App1",
"sandbox": False,
"cert_file": "sample.cert",
"key_file": "no_file.key"
}
]
| 16.32 | 35 | 0.632353 |
fe98c361232dc55b23f29468636e5c8643126622
| 1,230 |
py
|
Python
|
runtests.py
|
agronick/django-vueformgenerator
|
6c59be1d06118327af08cc0e6260d234f12f715e
|
[
"MIT"
] | 7 |
2016-10-20T11:49:01.000Z
|
2020-06-17T22:12:59.000Z
|
runtests.py
|
agronick/django-vueformgenerator
|
6c59be1d06118327af08cc0e6260d234f12f715e
|
[
"MIT"
] | 2 |
2019-03-19T15:41:43.000Z
|
2019-03-29T17:39:44.000Z
|
runtests.py
|
agronick/django-vueformgenerator
|
6c59be1d06118327af08cc0e6260d234f12f715e
|
[
"MIT"
] | 5 |
2016-12-01T15:41:15.000Z
|
2019-10-06T02:00:20.000Z
|
import sys
try:
from django.conf import settings
from django.test.utils import get_runner
settings.configure(
DEBUG=True,
USE_TZ=True,
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
ROOT_URLCONF="django_vueformgenerator.urls",
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"django_vueformgenerator",
"tests",
],
SITE_ID=1,
MIDDLEWARE_CLASSES=(),
)
try:
import django
setup = django.setup
except AttributeError:
pass
else:
setup()
except ImportError:
import traceback
traceback.print_exc()
msg = "To fix this error, run: pip install -r requirements_test.txt"
raise ImportError(msg)
def run_tests(*test_args):
if not test_args:
test_args = ['tests']
# Run tests
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(test_args)
if failures:
sys.exit(bool(failures))
if __name__ == '__main__':
run_tests(*sys.argv[1:])
| 21.206897 | 72 | 0.580488 |
af0ee2a83a6cc55a3b75bc4ab5fe8d3bd90852c2
| 4,056 |
py
|
Python
|
pysnmp-with-texts/Brcm-BASPTrap-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8 |
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/Brcm-BASPTrap-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4 |
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/Brcm-BASPTrap-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10 |
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module Brcm-BASPTrap-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Brcm-BASPTrap-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:42:48 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
NotificationType, TimeTicks, ObjectIdentity, Unsigned32, MibIdentifier, iso, enterprises, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, Counter32, Counter64, NotificationType, Bits, Gauge32, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "TimeTicks", "ObjectIdentity", "Unsigned32", "MibIdentifier", "iso", "enterprises", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "Counter32", "Counter64", "NotificationType", "Bits", "Gauge32", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
broadcom = MibIdentifier((1, 3, 6, 1, 4, 1, 4413))
enet = MibIdentifier((1, 3, 6, 1, 4, 1, 4413, 1))
basp = MibIdentifier((1, 3, 6, 1, 4, 1, 4413, 1, 2))
baspConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 4413, 1, 2, 1))
baspStat = MibIdentifier((1, 3, 6, 1, 4, 1, 4413, 1, 2, 2))
baspTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 4413, 1, 2, 3))
trapAdapterName = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 2, 3, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapAdapterName.setStatus('mandatory')
if mibBuilder.loadTexts: trapAdapterName.setDescription('The adapter name.')
trapTeamName = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 2, 3, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapTeamName.setStatus('mandatory')
if mibBuilder.loadTexts: trapTeamName.setDescription('The team name.')
trapCauseDirection = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 2, 3, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("adapterActive", 1), ("adapterInactive", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapCauseDirection.setStatus('mandatory')
if mibBuilder.loadTexts: trapCauseDirection.setDescription('Event caused by failover condition.')
trapAdapterActivityCause = MibScalar((1, 3, 6, 1, 4, 1, 4413, 1, 2, 3, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("linkChange", 2), ("adapterEnabledOrDisabled", 3), ("adapterAddedOrRemoved", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapAdapterActivityCause.setStatus('mandatory')
if mibBuilder.loadTexts: trapAdapterActivityCause.setDescription('Addition explanation for the Event trapCauseDirection.')
failoverEvent = NotificationType((1, 3, 6, 1, 4, 1, 4413, 1, 2, 3) + (0,1)).setObjects(("Brcm-BASPTrap-MIB", "trapAdapterName"), ("Brcm-BASPTrap-MIB", "trapTeamName"), ("Brcm-BASPTrap-MIB", "trapCauseDirection"), ("Brcm-BASPTrap-MIB", "trapAdapterActivityCause"))
if mibBuilder.loadTexts: failoverEvent.setDescription('This trap is generated to indicate that adapter Fail-Over event has occured')
mibBuilder.exportSymbols("Brcm-BASPTrap-MIB", trapAdapterName=trapAdapterName, failoverEvent=failoverEvent, basp=basp, enet=enet, baspConfig=baspConfig, trapCauseDirection=trapCauseDirection, baspStat=baspStat, baspTrap=baspTrap, trapAdapterActivityCause=trapAdapterActivityCause, trapTeamName=trapTeamName, broadcom=broadcom)
| 115.885714 | 543 | 0.767012 |
040c1b0134e369a150b9ffb06884e625f0a4ef0f
| 2,035 |
py
|
Python
|
databricks/conftest.py
|
mercileesb/koalas
|
685176c512f31166f0e472aa0f461d0f1449fb0c
|
[
"Apache-2.0"
] | 1 |
2021-01-17T18:26:33.000Z
|
2021-01-17T18:26:33.000Z
|
databricks/conftest.py
|
mercileesb/koalas
|
685176c512f31166f0e472aa0f461d0f1449fb0c
|
[
"Apache-2.0"
] | null | null | null |
databricks/conftest.py
|
mercileesb/koalas
|
685176c512f31166f0e472aa0f461d0f1449fb0c
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy
import tempfile
import atexit
import shutil
import uuid
from distutils.version import LooseVersion
from pyspark import __version__
from databricks import koalas
from databricks.koalas import utils
# Initialize Spark session that should be used in doctests or unittests.
# Delta requires Spark 2.4.2+. See
# https://github.com/delta-io/delta#compatibility-with-apache-spark-versions.
if LooseVersion(__version__) >= LooseVersion("3.0.0"):
session = utils.default_session({"spark.jars.packages": "io.delta:delta-core_2.12:0.1.0"})
elif LooseVersion(__version__) >= LooseVersion("2.4.2"):
session = utils.default_session({"spark.jars.packages": "io.delta:delta-core_2.11:0.1.0"})
else:
session = utils.default_session()
@pytest.fixture(autouse=True)
def add_ks(doctest_namespace):
doctest_namespace['ks'] = koalas
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
doctest_namespace['np'] = numpy
@pytest.fixture(autouse=True)
def add_path(doctest_namespace):
path = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(path, ignore_errors=True))
doctest_namespace['path'] = path
@pytest.fixture(autouse=True)
def add_db(doctest_namespace):
db_name = str(uuid.uuid4()).replace("-", "")
session.sql("CREATE DATABASE %s" % db_name)
atexit.register(lambda: session.sql("DROP DATABASE IF EXISTS %s CASCADE" % db_name))
doctest_namespace['db'] = db_name
| 31.796875 | 94 | 0.749877 |
ceb5140de2fed287a4806326e253f7212cae60eb
| 4,835 |
py
|
Python
|
estimate.py
|
brunojacobs/ulsdpb
|
7beff2e5f086d352258cd128430ec16ebfde7d53
|
[
"MIT"
] | 5 |
2020-11-14T09:59:03.000Z
|
2021-06-10T14:27:40.000Z
|
estimate.py
|
tanetpongc/ulsdpb
|
7beff2e5f086d352258cd128430ec16ebfde7d53
|
[
"MIT"
] | null | null | null |
estimate.py
|
tanetpongc/ulsdpb
|
7beff2e5f086d352258cd128430ec16ebfde7d53
|
[
"MIT"
] | 1 |
2021-04-28T10:44:04.000Z
|
2021-04-28T10:44:04.000Z
|
# Standard library modules
from collections import namedtuple
import argparse
import os
# External modules
import numpy as np
# Own modules
import model.data
import model.fixed
import model.initialization
import model.optimization
import model.prior
import model.state
import settings
# Numpy settings
np.seterr(divide='raise', over='raise', under='ignore', invalid='raise')
# Get user arguments
parser = argparse.ArgumentParser()
parser.add_argument('-MODEL', type=str)
parser.add_argument('-M', type=int)
parser.add_argument('-N_ITER', type=int)
parser.add_argument('-N_SAVE_PER', type=int)
parser_args = parser.parse_args()
MODEL = parser_args.MODEL
M = parser_args.M
N_ITER = parser_args.N_ITER
N_SAVE_PER = parser_args.N_SAVE_PER
assert MODEL in ['FULL', 'NO_VAR', 'LDA_X'], \
'Valid options for MODEL argument are FULL, NO_VAR, or LDA_X'
assert M >= 2, \
'M should be an integer larger than or equal to 2'
# Process user arguments
EMULATE_LDA_X = None
NO_DYNAMICS = None
NO_REGRESSORS = None
if MODEL == 'FULL':
EMULATE_LDA_X = False
NO_DYNAMICS = False
NO_REGRESSORS = False
print('Complete ULSDPB-model')
elif MODEL == 'NO_VAR':
EMULATE_LDA_X = False
NO_DYNAMICS = True
NO_REGRESSORS = False
print('ULSDPB-model without VAR(1) effects')
elif MODEL == 'LDA_X':
EMULATE_LDA_X = True
NO_DYNAMICS = False
NO_REGRESSORS = False
print('ULSDPB-model restricted to LDA-X')
# Subfolder in the output folder that is M-specific
M_OUTPUT_FOLDER = os.path.join(settings.OUTPUT_FOLDER, 'M' + str(M))
if not os.path.exists(M_OUTPUT_FOLDER):
os.makedirs(M_OUTPUT_FOLDER)
# Define location for the .csv file with the initial C_JM matrix
INIT_C_JM_FILE = os.path.join(M_OUTPUT_FOLDER, settings.INIT_C_JM_FILENAME)
# Subfolder in the M-specific output folder that is model-specific
MODEL_OUTPUT_FOLDER = os.path.join(M_OUTPUT_FOLDER, MODEL)
if not os.path.exists(MODEL_OUTPUT_FOLDER):
os.makedirs(MODEL_OUTPUT_FOLDER)
# Create namedtuple with the VI settings
SettingsVI = namedtuple(
typename='SettingsVI',
field_names=settings.VI,
)
vi_settings = SettingsVI(**settings.VI) # noqa
# Create namedtuple with the other settings
SettingsMisc = namedtuple(
typename='SettingsMisc',
field_names=settings.MISC,
)
misc_settings = SettingsMisc(**settings.MISC) # noqa
# Load the (y_fused_ibn, x, w)-data
y_fused_ibn = np.loadtxt(settings.Y_CSV, dtype=int, delimiter=',')
x = np.loadtxt(settings.X_CSV, dtype=float, delimiter=',')
w = np.asfortranarray(np.loadtxt(settings.W_CSV, dtype=float, delimiter=','))
# Load the C_JM matrix with pseudo-counts from the LDA solution
initial_c_jm = np.loadtxt(INIT_C_JM_FILE, dtype=float, delimiter=',')
# Create a dataset, based on the (y_fused_ibn, x, w)-data
data = model.data.create_dataset(
emulate_lda_x=EMULATE_LDA_X,
y_fused_ibn=y_fused_ibn,
x=x,
w=w,
)
# Define fixed parameter values, based on the optimization settings
is_fixed, fixed_values = model.fixed.create_fixed(
emulate_lda_x=EMULATE_LDA_X,
no_dynamics=NO_DYNAMICS,
no_regressors=NO_REGRESSORS,
dim_i=data.dim_i,
dim_x=data.dim_x,
dim_w=data.dim_w,
M=M,
)
# Define the prior parameter values, as specified in model.elbo
prior = model.prior.create_prior(
is_fixed=is_fixed,
dim_j=data.dim_j,
dim_x=data.dim_x,
dim_w=data.dim_w,
M=M,
)
# Initialize the variational parameters
initial_state_stub = model.initialization.create_stub_initialization(
init_ss_mu_q_alpha_ib=settings.INIT_SS_MU_Q_ALPHA_IB,
init_ss_log_sigma_q_alpha_ib=settings.INIT_SS_LOG_SIGMA_Q_ALPHA_IB,
c_jm=initial_c_jm,
prior=prior,
is_fixed=is_fixed,
data=data,
M=M,
)
# Compute the corresponding variational expectations
q = model.state.create_state(
state_stub=initial_state_stub,
data=data,
prior=prior,
is_fixed=is_fixed,
fixed_values=fixed_values,
M=M,
)
np.savez_compressed(
file=os.path.join(MODEL_OUTPUT_FOLDER, 'data.npz'),
**data._asdict(),
)
np.savez_compressed(
file=os.path.join(MODEL_OUTPUT_FOLDER, 'prior.npz'),
**prior._asdict(),
)
np.savez_compressed(
file=os.path.join(MODEL_OUTPUT_FOLDER, 'initial_state.npz'),
**q._asdict(),
)
np.savez_compressed(
file=os.path.join(MODEL_OUTPUT_FOLDER, 'vi_settings.npz'),
**vi_settings._asdict(),
)
np.savez_compressed(
file=os.path.join(MODEL_OUTPUT_FOLDER, 'misc_settings.npz'),
**misc_settings._asdict(),
)
# Model estimation using variational inference
q, elbo_dict = model.optimization.routine(
q=q,
data=data,
prior=prior,
is_fixed=is_fixed,
fixed_values=fixed_values,
M=M,
model_output_folder=MODEL_OUTPUT_FOLDER,
n_iter=N_ITER,
n_save_per=N_SAVE_PER,
misc_settings=misc_settings,
vi_settings=vi_settings,
)
| 25.582011 | 77 | 0.739193 |
607757f90638850817a54f4ab190e200ae131cad
| 4,841 |
py
|
Python
|
virtual/lib/python3.6/site-packages/pip/_vendor/__init__.py
|
Lornakimani62/IP4-_One-Minute-Pitch
|
cabd4c9a2fc726f37fc98c5de561f3d86c6c66f9
|
[
"MIT"
] | 548 |
2020-09-20T10:31:50.000Z
|
2022-03-31T06:18:04.000Z
|
virtual/lib/python3.6/site-packages/pip/_vendor/__init__.py
|
Lornakimani62/IP4-_One-Minute-Pitch
|
cabd4c9a2fc726f37fc98c5de561f3d86c6c66f9
|
[
"MIT"
] | 24 |
2020-06-23T13:59:30.000Z
|
2022-03-08T23:26:34.000Z
|
virtual/lib/python3.6/site-packages/pip/_vendor/__init__.py
|
Lornakimani62/IP4-_One-Minute-Pitch
|
cabd4c9a2fc726f37fc98c5de561f3d86c6c66f9
|
[
"MIT"
] | 216 |
2020-09-20T12:57:26.000Z
|
2022-03-20T14:07:04.000Z
|
"""
pip._vendor is for vendoring dependencies of pip to prevent needing pip to
depend on something external.
Files inside of pip._vendor should be considered immutable and should only be
updated to versions from upstream.
"""
from __future__ import absolute_import
import glob
import os.path
import sys
# Downstream redistributors which have debundled our dependencies should also
# patch this value to be true. This will trigger the additional patching
# to cause things like "six" to be available as pip.
DEBUNDLED = False
# By default, look in this directory for a bunch of .whl files which we will
# add to the beginning of sys.path before attempting to import anything. This
# is done to support downstream re-distributors like Debian and Fedora who
# wish to create their own Wheels for our dependencies to aid in debundling.
WHEEL_DIR = os.path.abspath(os.path.dirname(__file__))
# Define a small helper function to alias our vendored modules to the real ones
# if the vendored ones do not exist. This idea of this was taken from
# https://github.com/kennethreitz/requests/pull/2567.
def vendored(modulename):
vendored_name = "{0}.{1}".format(__name__, modulename)
try:
__import__(vendored_name, globals(), locals(), level=0)
except ImportError:
try:
__import__(modulename, globals(), locals(), level=0)
except ImportError:
# We can just silently allow import failures to pass here. If we
# got to this point it means that ``import pip._vendor.whatever``
# failed and so did ``import whatever``. Since we're importing this
# upfront in an attempt to alias imports, not erroring here will
# just mean we get a regular import error whenever pip *actually*
# tries to import one of these modules to use it, which actually
# gives us a better error message than we would have otherwise
# gotten.
pass
else:
sys.modules[vendored_name] = sys.modules[modulename]
base, head = vendored_name.rsplit(".", 1)
setattr(sys.modules[base], head, sys.modules[modulename])
# If we're operating in a debundled setup, then we want to go ahead and trigger
# the aliasing of our vendored libraries as well as looking for wheels to add
# to our sys.path. This will cause all of this code to be a no-op typically
# however downstream redistributors can enable it in a consistent way across
# all platforms.
if DEBUNDLED:
# Actually look inside of WHEEL_DIR to find .whl files and add them to the
# front of our sys.path.
sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path
# Actually alias all of our vendored dependencies.
vendored("cachecontrol")
vendored("colorama")
vendored("distlib")
vendored("distro")
vendored("html5lib")
vendored("lockfile")
vendored("six")
vendored("six.moves")
vendored("six.moves.urllib")
vendored("six.moves.urllib.parse")
vendored("packaging")
vendored("packaging.version")
vendored("packaging.specifiers")
vendored("pkg_resources")
vendored("progress")
vendored("pytoml")
vendored("retrying")
vendored("requests")
vendored("requests.packages")
vendored("requests.packages.urllib3")
vendored("requests.packages.urllib3._collections")
vendored("requests.packages.urllib3.connection")
vendored("requests.packages.urllib3.connectionpool")
vendored("requests.packages.urllib3.contrib")
vendored("requests.packages.urllib3.contrib.ntlmpool")
vendored("requests.packages.urllib3.contrib.pyopenssl")
vendored("requests.packages.urllib3.exceptions")
vendored("requests.packages.urllib3.fields")
vendored("requests.packages.urllib3.filepost")
vendored("requests.packages.urllib3.packages")
vendored("requests.packages.urllib3.packages.ordered_dict")
vendored("requests.packages.urllib3.packages.six")
vendored("requests.packages.urllib3.packages.ssl_match_hostname")
vendored("requests.packages.urllib3.packages.ssl_match_hostname."
"_implementation")
vendored("requests.packages.urllib3.poolmanager")
vendored("requests.packages.urllib3.request")
vendored("requests.packages.urllib3.response")
vendored("requests.packages.urllib3.util")
vendored("requests.packages.urllib3.util.connection")
vendored("requests.packages.urllib3.util.request")
vendored("requests.packages.urllib3.util.response")
vendored("requests.packages.urllib3.util.retry")
vendored("requests.packages.urllib3.util.ssl_")
vendored("requests.packages.urllib3.util.timeout")
vendored("requests.packages.urllib3.util.url")
| 44.009091 | 80 | 0.705639 |
bdf100e62f868bd64d5c5ddaa4ddc94899ceb31d
| 7,824 |
py
|
Python
|
mesh_writer.py
|
jasperges/blenderseed
|
971cfa0303a76684aaa6b896571b31e9b6b84955
|
[
"MIT"
] | null | null | null |
mesh_writer.py
|
jasperges/blenderseed
|
971cfa0303a76684aaa6b896571b31e9b6b84955
|
[
"MIT"
] | null | null | null |
mesh_writer.py
|
jasperges/blenderseed
|
971cfa0303a76684aaa6b896571b31e9b6b84955
|
[
"MIT"
] | null | null | null |
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2014-2017 The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import bpy
import os
from . import util
#--------------------------------------------------------------------------------------------------
# Write a mesh object to disk in Wavefront OBJ format.
#--------------------------------------------------------------------------------------------------
def get_array2_key(v):
a = int(v[0] * 1000000)
b = int(v[1] * 1000000)
return a, b
def get_vector2_key(v):
w = v * 1000000
return int(w.x), int(w.y)
def get_vector3_key(v):
w = v * 1000000
return w.x, w.y, w.z
def write_mesh_to_disk(ob, scene, mesh, filepath):
mesh_parts = []
verts = []
verts_n = []
verts_t = []
faces_w = []
try:
obj_file = open(filepath, "w", encoding="utf8")
fw = obj_file.write
except:
util.asUpdate("Cannot create file %s. Check directory permissions." % filepath)
return
vertices = mesh.vertices
faces = mesh.tessfaces
uvtex = mesh.tessface_uv_textures
uvset = uvtex.active.data if uvtex else None
# Sort the faces by material.
sorted_faces = [(index, face) for index, face in enumerate(faces)]
sorted_faces.sort(key=lambda item: item[1].material_index)
# Write vertices.
for vertex in vertices:
v = vertex.co
verts.append("v %.15f %.15f %.15f\n" % (v.x, v.y, v.z))
fw(('').join(verts))
# Deduplicate and write normals.
normal_indices = {}
vertex_normal_indices = {}
face_normal_indices = {}
current_normal_index = 0
for face_index, face in sorted_faces:
if face.use_smooth:
for vertex_index in face.vertices:
vn = vertices[vertex_index].normal
vn_key = (vn.x, vn.y, vn.z)
if vn_key in normal_indices:
vertex_normal_indices[vertex_index] = normal_indices[vn_key]
else:
verts_n.append("vn %.15f %.15f %.15f\n" % (vn.x, vn.y, vn.z))
normal_indices[vn_key] = current_normal_index
vertex_normal_indices[vertex_index] = current_normal_index
current_normal_index += 1
else:
vn = face.normal
vn_key = (vn.x, vn.y, vn.z)
if vn_key in normal_indices:
face_normal_indices[face_index] = normal_indices[vn_key]
else:
verts_n.append("vn %.15f %.15f %.15f\n" % (vn.x, vn.y, vn.z))
normal_indices[vn_key] = current_normal_index
face_normal_indices[face_index] = current_normal_index
current_normal_index += 1
fw(('').join(verts_n))
# Deduplicate and write texture coordinates.
if uvset:
vt_indices = {}
vertex_texcoord_indices = {}
current_vt_index = 0
for face_index, face in sorted_faces:
assert len(uvset[face_index].uv) == len(face.vertices)
for vt_index, vt in enumerate(uvset[face_index].uv):
vertex_index = face.vertices[vt_index]
vt_key = get_array2_key(vt)
if vt_key in vt_indices:
vertex_texcoord_indices[face_index, vertex_index] = vt_indices[vt_key]
else:
verts_t.append("vt %.15f %.15f\n" % (vt[0], vt[1]))
vt_indices[vt_key] = current_vt_index
vertex_texcoord_indices[face_index, vertex_index] = current_vt_index
current_vt_index += 1
fw(('').join(verts_t))
# Write faces.
current_material_index = -1
for face_index, face in sorted_faces:
if current_material_index != face.material_index:
current_material_index = face.material_index
mesh_name = "part_%d" % current_material_index
mesh_parts.append((current_material_index, mesh_name))
faces_w.append("o {0}\n".format(mesh_name))
line = "f"
if uvset and len(uvset[face_index].uv) > 0:
if face.use_smooth:
for vertex_index in face.vertices:
texcoord_index = vertex_texcoord_indices[face_index, vertex_index]
normal_index = vertex_normal_indices[vertex_index]
line += " %d/%d/%d" % (vertex_index + 1, texcoord_index + 1, normal_index + 1)
else:
normal_index = face_normal_indices[face_index]
for vertex_index in face.vertices:
texcoord_index = vertex_texcoord_indices[face_index, vertex_index]
line += " %d/%d/%d" % (vertex_index + 1, texcoord_index + 1, normal_index + 1)
else:
if face.use_smooth:
for vertex_index in face.vertices:
normal_index = vertex_normal_indices[vertex_index]
line += " %d//%d" % (vertex_index + 1, normal_index + 1)
else:
normal_index = face_normal_indices[face_index]
for vertex_index in face.vertices:
line += " %d//%d" % (vertex_index + 1, normal_index + 1)
faces_w.append(line + "\n")
fw(('').join(faces_w))
obj_file.close()
return mesh_parts
# End with block
def write_curves_to_disk(ob, scene, psys, filepath):
"""
Write curves object to file.
"""
with open(filepath, "w") as curves_file:
fw = curves_file.write
psys.set_resolution(scene, ob, 'RENDER')
steps = 2 ** psys.settings.render_step
# Write the number of hairs to the file
num_curves = len(psys.particles) if len(psys.child_particles) == 0 else len(psys.child_particles)
fw("%d\n" % num_curves)
# Write the number of points per hair to the file
fw("%d\n" % steps)
root_size = psys.settings.appleseed.root_size * psys.settings.appleseed.scaling
tip_size = psys.settings.appleseed.tip_size * psys.settings.appleseed.scaling
radius_decrement = util.calc_decrement(root_size, tip_size, steps)
for p in range(0, num_curves):
p_radius = root_size
for step in range(0, steps):
# A hack for now, to keep the points at max of 4
if step == 4:
break
co = psys.co_hair(ob, p, step)
radius = p_radius
fw("%.6f %.6f %.6f %.4f " % (co.x, co.y, co.z, radius))
p_radius -= radius_decrement
fw("\n")
psys.set_resolution(scene, ob, 'PREVIEW')
return
| 37.980583 | 105 | 0.595475 |
8c84819fca28bbd3c288793c49d66b29db01d0c4
| 2,018 |
py
|
Python
|
python/v2.1/get_creative_fields.py
|
falbassini/Samples
|
f7112c07fc8cbe583648f47f07dda54f769ca019
|
[
"Apache-2.0"
] | null | null | null |
python/v2.1/get_creative_fields.py
|
falbassini/Samples
|
f7112c07fc8cbe583648f47f07dda54f769ca019
|
[
"Apache-2.0"
] | null | null | null |
python/v2.1/get_creative_fields.py
|
falbassini/Samples
|
f7112c07fc8cbe583648f47f07dda54f769ca019
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example lists all creative fields."""
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to get creative fields for')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.1', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
try:
# Construct the request.
request = service.creativeFields().list(profileId=profile_id)
while True:
# Execute request and print response.
response = request.execute()
for field in response['creativeFields']:
print ('Found creative field with ID %s and name "%s".'
% (field['id'], field['name']))
if response['creativeFields'] and response['nextPageToken']:
request = service.creativeFields().list_next(request, response)
else:
break
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| 31.046154 | 77 | 0.708622 |
08c5162e09a73008ad632eac6a75924df95519bf
| 7,818 |
py
|
Python
|
sleap/gui/dialogs/delete.py
|
preeti98/sleap
|
203c3a03c0c54f8dab242611d9a8d24595e98081
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
sleap/gui/dialogs/delete.py
|
preeti98/sleap
|
203c3a03c0c54f8dab242611d9a8d24595e98081
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
sleap/gui/dialogs/delete.py
|
preeti98/sleap
|
203c3a03c0c54f8dab242611d9a8d24595e98081
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
"""
Dialog for deleting various subsets of instances in dataset.
"""
from sleap import LabeledFrame, Instance
from sleap.gui.dialogs import formbuilder
from PySide2 import QtCore, QtWidgets
from typing import List, Text, Tuple
class DeleteDialog(QtWidgets.QDialog):
"""
Dialog for deleting various subsets of instances in dataset.
Args:
context: The `CommandContext` from which this dialog is being
shown. The context provides both a `labels` (`Labels`) and a
`state` (`GuiState`).
"""
# NOTE: use type by name (rather than importing CommandContext) to avoid
# circular includes.
def __init__(
self, context: "CommandContext", *args, **kwargs,
):
super(DeleteDialog, self).__init__(*args, **kwargs)
self.context = context
# Layout for main form and buttons
self.form_widget = self._make_form_widget()
buttons_layout_widget = self._make_button_widget()
# Layout for entire dialog
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.form_widget)
layout.addWidget(buttons_layout_widget)
self.setLayout(layout)
self.accepted.connect(self.delete)
def _make_form_widget(self):
self.tracks = self.context.labels.tracks
widget = QtWidgets.QGroupBox()
layout = QtWidgets.QFormLayout()
self.instance_type_menu = formbuilder.FieldComboWidget()
self.frames_menu = formbuilder.FieldComboWidget()
self.tracks_menu = formbuilder.FieldComboWidget()
instance_type_options = [
"predicted instances",
"user instances",
"all instances",
]
frame_options = [
"current frame",
"current video",
]
if len(self.context.labels.videos) > 1:
frame_options.append("all videos")
if self.context.state["has_frame_range"]:
frame_options.extend(
["selected clip", "current video except for selected clip"]
)
if self.tracks:
track_options = [
"any track identity (including none)",
"no track identity set",
]
self._track_idx_offset = len(track_options)
track_options.extend([track.name for track in self.tracks])
else:
self._track_idx_offset = 0
track_options = []
self.instance_type_menu.set_options(instance_type_options)
self.frames_menu.set_options(frame_options)
self.tracks_menu.set_options(track_options)
layout.addRow("Delete", self.instance_type_menu)
layout.addRow("in", self.frames_menu)
if self.tracks:
layout.addRow("with", self.tracks_menu)
widget.setLayout(layout)
return widget
def _make_button_widget(self):
# Layout for buttons
buttons = QtWidgets.QDialogButtonBox()
self.cancel_button = buttons.addButton(QtWidgets.QDialogButtonBox.Cancel)
self.delete_button = buttons.addButton(
"Delete", QtWidgets.QDialogButtonBox.AcceptRole
)
buttons_layout = QtWidgets.QHBoxLayout()
buttons_layout.addWidget(buttons, alignment=QtCore.Qt.AlignTop)
buttons_layout_widget = QtWidgets.QWidget()
buttons_layout_widget.setLayout(buttons_layout)
# Connect actions for buttons
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
return buttons_layout_widget
def get_selected_track(self):
track_menu_idx = self.tracks_menu.currentIndex()
track_idx = track_menu_idx - self._track_idx_offset
if 0 <= track_idx < len(self.tracks):
return self.tracks[track_idx]
return None
def get_frames_instances(
self, instance_type_value: Text, frames_value: Text, tracks_value: Text
) -> List[Tuple[LabeledFrame, Instance]]:
"""Get list of instances based on drop-down menu options selected."""
def inst_condition(inst):
if instance_type_value.startswith("predicted"):
if not hasattr(inst, "score"):
return False
elif instance_type_value.startswith("user"):
if hasattr(inst, "score"):
return False
if tracks_value.startswith("any"):
# print("match any track")
pass
elif tracks_value.startswith("no"):
# print("match None track")
if inst.track is not None:
return False
else:
track_to_match = self.get_selected_track()
if track_to_match:
if inst.track != track_to_match:
return False
return True
labels = self.context.labels
lf_list = []
if frames_value == "current frame":
lf_list = labels.find(
video=self.context.state["video"],
frame_idx=self.context.state["frame_idx"],
)
elif frames_value == "current video":
lf_list = labels.find(video=self.context.state["video"],)
elif frames_value == "all videos":
lf_list = labels.labeled_frames
elif frames_value == "selected clip":
clip_range = range(*self.context.state["frame_range"])
print(clip_range)
lf_list = labels.find(
video=self.context.state["video"], frame_idx=clip_range
)
elif frames_value == "current video except for selected clip":
clip_range = range(*self.context.state["frame_range"])
lf_list = [
lf
for lf in labels.labeled_frames
if (
lf.video != self.context.state["video"]
or lf.frame_idx not in clip_range
)
]
else:
raise ValueError(f"Invalid frames_value: {frames_value}")
lf_inst_list = [
(lf, inst) for lf in lf_list for inst in lf if inst_condition(inst)
]
return lf_inst_list
def delete(self):
instance_type_value = self.instance_type_menu.value()
frames_value = self.frames_menu.value()
tracks_value = self.tracks_menu.value()
lf_inst_list = self.get_frames_instances(
instance_type_value=instance_type_value,
frames_value=frames_value,
tracks_value=tracks_value,
)
# print(len(lf_inst_list))
# print(instance_type_value)
# print(frames_value)
# print(tracks_value)
self._delete(lf_inst_list)
def _delete(self, lf_inst_list: List[Tuple[LabeledFrame, Instance]]):
# Delete the instances
for lf, inst in lf_inst_list:
self.context.labels.remove_instance(lf, inst, in_transaction=True)
if not lf.instances:
self.context.labels.remove(lf)
# Update caches since we skipped doing this after each deletion
self.context.labels.update_cache()
# Log update
self.context.changestack_push("delete instances")
if __name__ == "__main__":
app = QtWidgets.QApplication([])
from sleap import Labels
from sleap.gui.commands import CommandContext
labels = Labels.load_file(
"tests/data/json_format_v2/centered_pair_predictions.json"
)
context = CommandContext.from_labels(labels)
context.state["frame_idx"] = 123
context.state["video"] = labels.videos[0]
context.state["has_frame_range"] = True
context.state["frame_range"] = (10, 20)
win = DeleteDialog(context=context)
win.show()
app.exec_()
| 32.17284 | 81 | 0.610003 |
73d5951630a93780aacd2a640a2670e9fcadff7e
| 2,200 |
py
|
Python
|
app/views.py
|
rafalbielech/Django-Home-application
|
cff6429e33a91336cb377fe01e86ff274ca56f43
|
[
"MIT"
] | null | null | null |
app/views.py
|
rafalbielech/Django-Home-application
|
cff6429e33a91336cb377fe01e86ff274ca56f43
|
[
"MIT"
] | null | null | null |
app/views.py
|
rafalbielech/Django-Home-application
|
cff6429e33a91336cb377fe01e86ff274ca56f43
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404, redirect
from django.template import loader
from django.http import HttpResponse
from django import template
from django.conf import settings
import json
@login_required(login_url="/login/")
def index(request):
"""
cameras --> used to populate template
alias_to_ip_map --> used to populate alias_to_ip_map
access_tokens --> used to populate tokens
"""
return render(
request,
"app-page/home.html",
{
"rtsp_cameras": settings.CONFIG.get("local", {}).get("rtsp_camera", []),
"cameras": settings.CONFIG.get("local", {}).get("network_info", []),
"camera_ip": json.dumps(settings.CONFIG.get("local", {}).get("network_info", [])),
"access_tokens": json.dumps(settings.CONFIG.get("tokens", {})),
},
)
@login_required(login_url="/login/")
def parameter_inspection(request):
"""
alias_to_ip_map --> used to populate alias_to_ip_map
access_tokens --> used to populate tokens
"""
return render(
request,
"app-page/param_stats.html",
{
"cameras": settings.CONFIG.get("local", {}).get("network_info", []),
"camera_ip": json.dumps(settings.CONFIG.get("local", {}).get("network_info", [])),
"access_tokens": json.dumps(settings.CONFIG.get("tokens", {})),
},
)
@login_required(login_url="/login/")
def pages(request):
context = {}
try:
load_template = request.path.split("/")[-1]
context["segment"] = load_template
html_template = loader.get_template("template_examples/{}".format(load_template))
return HttpResponse(html_template.render(context, request))
except template.TemplateDoesNotExist:
html_template = loader.get_template("error/page-404.html")
return HttpResponse(html_template.render(context, request))
except:
html_template = loader.get_template("error/page-500.html")
return HttpResponse(html_template.render(context, request))
| 33.846154 | 94 | 0.650909 |
e28e98c87126f82d4fcb0ea3fa2c0973cce581a6
| 46,338 |
py
|
Python
|
youtuatools/options.py
|
Pagasis/YouTua
|
edb44b2065a7224f8b26aaf76166bf7287901567
|
[
"MIT"
] | 47 |
2021-01-02T07:44:50.000Z
|
2022-02-28T22:02:13.000Z
|
youtuatools/options.py
|
Pagasis/YouTua
|
edb44b2065a7224f8b26aaf76166bf7287901567
|
[
"MIT"
] | 4 |
2021-02-07T03:35:13.000Z
|
2021-10-31T19:23:53.000Z
|
youtuatools/options.py
|
Pagasis/YouTua
|
edb44b2065a7224f8b26aaf76166bf7287901567
|
[
"MIT"
] | 8 |
2021-01-03T05:44:39.000Z
|
2021-11-01T05:46:32.000Z
|
from __future__ import unicode_literals
import os.path
import optparse
import re
import sys
from .downloader.external import list_external_downloaders
from .compat import (
compat_expanduser,
compat_get_terminal_size,
compat_getenv,
compat_kwargs,
compat_shlex_split,
)
from .utils import (
preferredencoding,
write_string,
)
from .version import __version__
def _hide_login_info(opts):
PRIVATE_OPTS = set(
[
"-p",
"--password",
"-u",
"--username",
"--video-password",
"--ap-password",
"--ap-username",
]
)
eqre = re.compile(
"^(?P<key>" + ("|".join(re.escape(po) for po in PRIVATE_OPTS)) + ")=.+$"
)
def _scrub_eq(o):
m = eqre.match(o)
if m:
return m.group("key") + "=PRIVATE"
else:
return o
opts = list(map(_scrub_eq, opts))
for idx, opt in enumerate(opts):
if opt in PRIVATE_OPTS and idx + 1 < len(opts):
opts[idx + 1] = "PRIVATE"
return opts
def parseOpts(overrideArguments=None):
def _readOptions(filename_bytes, default=[]):
try:
optionf = open(filename_bytes)
except IOError:
return default # silently skip if file is not present
try:
# FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
contents = optionf.read()
if sys.version_info < (3,):
contents = contents.decode(preferredencoding())
res = compat_shlex_split(contents, comments=True)
finally:
optionf.close()
return res
def _readUserConf():
xdg_config_home = compat_getenv("XDG_CONFIG_HOME")
if xdg_config_home:
userConfFile = os.path.join(xdg_config_home, "youtube-dl", "config")
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(xdg_config_home, "youtube-dl.conf")
else:
userConfFile = os.path.join(
compat_expanduser("~"), ".config", "youtube-dl", "config"
)
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(
compat_expanduser("~"), ".config", "youtube-dl.conf"
)
userConf = _readOptions(userConfFile, None)
if userConf is None:
appdata_dir = compat_getenv("appdata")
if appdata_dir:
userConf = _readOptions(
os.path.join(appdata_dir, "youtube-dl", "config"), default=None
)
if userConf is None:
userConf = _readOptions(
os.path.join(appdata_dir, "youtube-dl", "config.txt"),
default=None,
)
if userConf is None:
userConf = _readOptions(
os.path.join(compat_expanduser("~"), "youtube-dl.conf"), default=None
)
if userConf is None:
userConf = _readOptions(
os.path.join(compat_expanduser("~"), "youtube-dl.conf.txt"),
default=None,
)
if userConf is None:
userConf = []
return userConf
def _format_option_string(option):
"""('-o', '--option') -> -o, --format METAVAR"""
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, ", ")
if option.takes_value():
opts.append(" %s" % option.metavar)
return "".join(opts)
def _comma_separated_values_options_callback(option, opt_str, value, parser):
setattr(parser.values, option.dest, value.split(","))
# No need to wrap help messages if we're on a wide console
columns = compat_get_terminal_size().columns
max_width = columns if columns else 80
max_help_position = 80
fmt = optparse.IndentedHelpFormatter(
width=max_width, max_help_position=max_help_position
)
fmt.format_option_strings = _format_option_string
kw = {
"version": __version__,
"formatter": fmt,
"usage": "%prog [OPTIONS] URL [URL...]",
"conflict_handler": "resolve",
}
parser = optparse.OptionParser(**compat_kwargs(kw))
general = optparse.OptionGroup(parser, "General Options")
general.add_option(
"-h", "--help", action="help", help="Print this help text and exit"
)
general.add_option(
"--version", action="version", help="Print program version and exit"
)
general.add_option(
"-U",
"--update",
action="store_true",
dest="update_self",
help="Update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)",
)
general.add_option(
"-i",
"--ignore-errors",
action="store_true",
dest="ignoreerrors",
default=False,
help="Continue on download errors, for example to skip unavailable videos in a playlist",
)
general.add_option(
"--abort-on-error",
action="store_false",
dest="ignoreerrors",
help="Abort downloading of further videos (in the playlist or the command line) if an error occurs",
)
general.add_option(
"--dump-user-agent",
action="store_true",
dest="dump_user_agent",
default=False,
help="Display the current browser identification",
)
general.add_option(
"--list-extractors",
action="store_true",
dest="list_extractors",
default=False,
help="List all supported extractors",
)
general.add_option(
"--extractor-descriptions",
action="store_true",
dest="list_extractor_descriptions",
default=False,
help="Output descriptions of all supported extractors",
)
general.add_option(
"--force-generic-extractor",
action="store_true",
dest="force_generic_extractor",
default=False,
help="Force extraction to use the generic extractor",
)
general.add_option(
"--default-search",
dest="default_search",
metavar="PREFIX",
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.',
)
general.add_option(
"--ignore-config",
action="store_true",
help="Do not read configuration files. "
"When given in the global configuration file /etc/youtube-dl.conf: "
"Do not read the user configuration in ~/.config/youtube-dl/config "
"(%APPDATA%/youtube-dl/config.txt on Windows)",
)
general.add_option(
"--config-location",
dest="config_location",
metavar="PATH",
help="Location of the configuration file; either the path to the config or its containing directory.",
)
general.add_option(
"--flat-playlist",
action="store_const",
dest="extract_flat",
const="in_playlist",
default=False,
help="Do not extract the videos of a playlist, only list them.",
)
general.add_option(
"--mark-watched",
action="store_true",
dest="mark_watched",
default=False,
help="Mark videos watched (YouTube only)",
)
general.add_option(
"--no-mark-watched",
action="store_false",
dest="mark_watched",
default=False,
help="Do not mark videos watched (YouTube only)",
)
general.add_option(
"--no-color",
"--no-colors",
action="store_true",
dest="no_color",
default=False,
help="Do not emit color codes in output",
)
network = optparse.OptionGroup(parser, "Network Options")
network.add_option(
"--proxy",
dest="proxy",
default=None,
metavar="URL",
help="Use the specified HTTP/HTTPS/SOCKS proxy. To enable "
"SOCKS proxy, specify a proper scheme. For example "
'socks5://127.0.0.1:1080/. Pass in an empty string (--proxy "") '
"for direct connection",
)
network.add_option(
"--socket-timeout",
dest="socket_timeout",
type=float,
default=None,
metavar="SECONDS",
help="Time to wait before giving up, in seconds",
)
network.add_option(
"--source-address",
metavar="IP",
dest="source_address",
default=None,
help="Client-side IP address to bind to",
)
network.add_option(
"-4",
"--force-ipv4",
action="store_const",
const="0.0.0.0",
dest="source_address",
help="Make all connections via IPv4",
)
network.add_option(
"-6",
"--force-ipv6",
action="store_const",
const="::",
dest="source_address",
help="Make all connections via IPv6",
)
geo = optparse.OptionGroup(parser, "Geo Restriction")
geo.add_option(
"--geo-verification-proxy",
dest="geo_verification_proxy",
default=None,
metavar="URL",
help="Use this proxy to verify the IP address for some geo-restricted sites. "
"The default proxy specified by --proxy (or none, if the option is not present) is used for the actual downloading.",
)
geo.add_option(
"--cn-verification-proxy",
dest="cn_verification_proxy",
default=None,
metavar="URL",
help=optparse.SUPPRESS_HELP,
)
geo.add_option(
"--geo-bypass",
action="store_true",
dest="geo_bypass",
default=True,
help="Bypass geographic restriction via faking X-Forwarded-For HTTP header",
)
geo.add_option(
"--no-geo-bypass",
action="store_false",
dest="geo_bypass",
default=True,
help="Do not bypass geographic restriction via faking X-Forwarded-For HTTP header",
)
geo.add_option(
"--geo-bypass-country",
metavar="CODE",
dest="geo_bypass_country",
default=None,
help="Force bypass geographic restriction with explicitly provided two-letter ISO 3166-2 country code",
)
geo.add_option(
"--geo-bypass-ip-block",
metavar="IP_BLOCK",
dest="geo_bypass_ip_block",
default=None,
help="Force bypass geographic restriction with explicitly provided IP block in CIDR notation",
)
selection = optparse.OptionGroup(parser, "Video Selection")
selection.add_option(
"--playlist-start",
dest="playliststart",
metavar="NUMBER",
default=1,
type=int,
help="Playlist video to start at (default is %default)",
)
selection.add_option(
"--playlist-end",
dest="playlistend",
metavar="NUMBER",
default=None,
type=int,
help="Playlist video to end at (default is last)",
)
selection.add_option(
"--playlist-items",
dest="playlist_items",
metavar="ITEM_SPEC",
default=None,
help='Playlist video items to download. Specify indices of the videos in the playlist separated by commas like: "--playlist-items 1,2,5,8" if you want to download videos indexed 1, 2, 5, 8 in the playlist. You can specify range: "--playlist-items 1-3,7,10-13", it will download the videos at index 1, 2, 3, 7, 10, 11, 12 and 13.',
)
selection.add_option(
"--match-title",
dest="matchtitle",
metavar="REGEX",
help="Download only matching titles (regex or caseless sub-string)",
)
selection.add_option(
"--reject-title",
dest="rejecttitle",
metavar="REGEX",
help="Skip download for matching titles (regex or caseless sub-string)",
)
selection.add_option(
"--max-downloads",
dest="max_downloads",
metavar="NUMBER",
type=int,
default=None,
help="Abort after downloading NUMBER files",
)
selection.add_option(
"--min-filesize",
metavar="SIZE",
dest="min_filesize",
default=None,
help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)",
)
selection.add_option(
"--max-filesize",
metavar="SIZE",
dest="max_filesize",
default=None,
help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)",
)
selection.add_option(
"--date",
metavar="DATE",
dest="date",
default=None,
help="Download only videos uploaded in this date",
)
selection.add_option(
"--datebefore",
metavar="DATE",
dest="datebefore",
default=None,
help="Download only videos uploaded on or before this date (i.e. inclusive)",
)
selection.add_option(
"--dateafter",
metavar="DATE",
dest="dateafter",
default=None,
help="Download only videos uploaded on or after this date (i.e. inclusive)",
)
selection.add_option(
"--min-views",
metavar="COUNT",
dest="min_views",
default=None,
type=int,
help="Do not download any videos with less than COUNT views",
)
selection.add_option(
"--max-views",
metavar="COUNT",
dest="max_views",
default=None,
type=int,
help="Do not download any videos with more than COUNT views",
)
selection.add_option(
"--match-filter",
metavar="FILTER",
dest="match_filter",
default=None,
help=(
"Generic video filter. "
'Specify any key (see the "OUTPUT TEMPLATE" for a list of available keys) to '
"match if the key is present, "
"!key to check if the key is not present, "
'key > NUMBER (like "comment_count > 12", also works with '
">=, <, <=, !=, =) to compare against a number, "
"key = 'LITERAL' (like \"uploader = 'Mike Smith'\", also works with !=) "
"to match against a string literal "
"and & to require multiple matches. "
"Values which are not known are excluded unless you "
"put a question mark (?) after the operator. "
"For example, to only match videos that have been liked more than "
"100 times and disliked less than 50 times (or the dislike "
"functionality is not available at the given service), but who "
"also have a description, use --match-filter "
'"like_count > 100 & dislike_count <? 50 & description" .'
),
)
selection.add_option(
"--no-playlist",
action="store_true",
dest="noplaylist",
default=False,
help="Download only the video, if the URL refers to a video and a playlist.",
)
selection.add_option(
"--yes-playlist",
action="store_false",
dest="noplaylist",
default=False,
help="Download the playlist, if the URL refers to a video and a playlist.",
)
selection.add_option(
"--age-limit",
metavar="YEARS",
dest="age_limit",
default=None,
type=int,
help="Download only videos suitable for the given age",
)
selection.add_option(
"--download-archive",
metavar="FILE",
dest="download_archive",
help="Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.",
)
selection.add_option(
"--include-ads",
dest="include_ads",
action="store_true",
help="Download advertisements as well (experimental)",
)
authentication = optparse.OptionGroup(parser, "Authentication Options")
authentication.add_option(
"-u",
"--username",
dest="username",
metavar="USERNAME",
help="Login with this account ID",
)
authentication.add_option(
"-p",
"--password",
dest="password",
metavar="PASSWORD",
help="Account password. If this option is left out, youtube-dl will ask interactively.",
)
authentication.add_option(
"-2",
"--twofactor",
dest="twofactor",
metavar="TWOFACTOR",
help="Two-factor authentication code",
)
authentication.add_option(
"-n",
"--netrc",
action="store_true",
dest="usenetrc",
default=False,
help="Use .netrc authentication data",
)
authentication.add_option(
"--video-password",
dest="videopassword",
metavar="PASSWORD",
help="Video password (vimeo, youku)",
)
adobe_pass = optparse.OptionGroup(parser, "Adobe Pass Options")
adobe_pass.add_option(
"--ap-mso",
dest="ap_mso",
metavar="MSO",
help="Adobe Pass multiple-system operator (TV provider) identifier, use --ap-list-mso for a list of available MSOs",
)
adobe_pass.add_option(
"--ap-username",
dest="ap_username",
metavar="USERNAME",
help="Multiple-system operator account login",
)
adobe_pass.add_option(
"--ap-password",
dest="ap_password",
metavar="PASSWORD",
help="Multiple-system operator account password. If this option is left out, youtube-dl will ask interactively.",
)
adobe_pass.add_option(
"--ap-list-mso",
action="store_true",
dest="ap_list_mso",
default=False,
help="List all supported multiple-system operators",
)
video_format = optparse.OptionGroup(parser, "Video Format Options")
video_format.add_option(
"-f",
"--format",
action="store",
dest="format",
metavar="FORMAT",
default=None,
help='Video format code, see the "FORMAT SELECTION" for all the info',
)
video_format.add_option(
"--all-formats",
action="store_const",
dest="format",
const="all",
help="Download all available video formats",
)
video_format.add_option(
"--prefer-free-formats",
action="store_true",
dest="prefer_free_formats",
default=False,
help="Prefer free video formats unless a specific one is requested",
)
video_format.add_option(
"-F",
"--list-formats",
action="store_true",
dest="listformats",
help="List all available formats of requested videos",
)
video_format.add_option(
"--youtube-include-dash-manifest",
action="store_true",
dest="youtube_include_dash_manifest",
default=True,
help=optparse.SUPPRESS_HELP,
)
video_format.add_option(
"--youtube-skip-dash-manifest",
action="store_false",
dest="youtube_include_dash_manifest",
help="Do not download the DASH manifests and related data on YouTube videos",
)
video_format.add_option(
"--merge-output-format",
action="store",
dest="merge_output_format",
metavar="FORMAT",
default=None,
help=(
"If a merge is required (e.g. bestvideo+bestaudio), "
"output to given container format. One of mkv, mp4, ogg, webm, flv. "
"Ignored if no merge is required"
),
)
subtitles = optparse.OptionGroup(parser, "Subtitle Options")
subtitles.add_option(
"--write-sub",
"--write-srt",
action="store_true",
dest="writesubtitles",
default=False,
help="Write subtitle file",
)
subtitles.add_option(
"--write-auto-sub",
"--write-automatic-sub",
action="store_true",
dest="writeautomaticsub",
default=False,
help="Write automatically generated subtitle file (YouTube only)",
)
subtitles.add_option(
"--all-subs",
action="store_true",
dest="allsubtitles",
default=False,
help="Download all the available subtitles of the video",
)
subtitles.add_option(
"--list-subs",
action="store_true",
dest="listsubtitles",
default=False,
help="List all available subtitles for the video",
)
subtitles.add_option(
"--sub-format",
action="store",
dest="subtitlesformat",
metavar="FORMAT",
default="best",
help='Subtitle format, accepts formats preference, for example: "srt" or "ass/srt/best"',
)
subtitles.add_option(
"--sub-lang",
"--sub-langs",
"--srt-lang",
action="callback",
dest="subtitleslangs",
metavar="LANGS",
type="str",
default=[],
callback=_comma_separated_values_options_callback,
help="Languages of the subtitles to download (optional) separated by commas, use --list-subs for available language tags",
)
downloader = optparse.OptionGroup(parser, "Download Options")
downloader.add_option(
"-r",
"--limit-rate",
"--rate-limit",
dest="ratelimit",
metavar="RATE",
help="Maximum download rate in bytes per second (e.g. 50K or 4.2M)",
)
downloader.add_option(
"-R",
"--retries",
dest="retries",
metavar="RETRIES",
default=10,
help='Number of retries (default is %default), or "infinite".',
)
downloader.add_option(
"--fragment-retries",
dest="fragment_retries",
metavar="RETRIES",
default=10,
help='Number of retries for a fragment (default is %default), or "infinite" (DASH, hlsnative and ISM)',
)
downloader.add_option(
"--skip-unavailable-fragments",
action="store_true",
dest="skip_unavailable_fragments",
default=True,
help="Skip unavailable fragments (DASH, hlsnative and ISM)",
)
downloader.add_option(
"--abort-on-unavailable-fragment",
action="store_false",
dest="skip_unavailable_fragments",
help="Abort downloading when some fragment is not available",
)
downloader.add_option(
"--keep-fragments",
action="store_true",
dest="keep_fragments",
default=False,
help="Keep downloaded fragments on disk after downloading is finished; fragments are erased by default",
)
downloader.add_option(
"--buffer-size",
dest="buffersize",
metavar="SIZE",
default="1024",
help="Size of download buffer (e.g. 1024 or 16K) (default is %default)",
)
downloader.add_option(
"--no-resize-buffer",
action="store_true",
dest="noresizebuffer",
default=False,
help="Do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.",
)
downloader.add_option(
"--http-chunk-size",
dest="http_chunk_size",
metavar="SIZE",
default=None,
help="Size of a chunk for chunk-based HTTP downloading (e.g. 10485760 or 10M) (default is disabled). "
"May be useful for bypassing bandwidth throttling imposed by a webserver (experimental)",
)
downloader.add_option(
"--test",
action="store_true",
dest="test",
default=False,
help=optparse.SUPPRESS_HELP,
)
downloader.add_option(
"--playlist-reverse",
action="store_true",
help="Download playlist videos in reverse order",
)
downloader.add_option(
"--playlist-random",
action="store_true",
help="Download playlist videos in random order",
)
downloader.add_option(
"--xattr-set-filesize",
dest="xattr_set_filesize",
action="store_true",
help="Set file xattribute ytdl.filesize with expected file size",
)
downloader.add_option(
"--hls-prefer-native",
dest="hls_prefer_native",
action="store_true",
default=None,
help="Use the native HLS downloader instead of ffmpeg",
)
downloader.add_option(
"--hls-prefer-ffmpeg",
dest="hls_prefer_native",
action="store_false",
default=None,
help="Use ffmpeg instead of the native HLS downloader",
)
downloader.add_option(
"--hls-use-mpegts",
dest="hls_use_mpegts",
action="store_true",
help="Use the mpegts container for HLS videos, allowing to play the "
"video while downloading (some players may not be able to play it)",
)
downloader.add_option(
"--external-downloader",
dest="external_downloader",
metavar="COMMAND",
help="Use the specified external downloader. "
"Currently supports %s" % ",".join(list_external_downloaders()),
)
downloader.add_option(
"--external-downloader-args",
dest="external_downloader_args",
metavar="ARGS",
help="Give these arguments to the external downloader",
)
workarounds = optparse.OptionGroup(parser, "Workarounds")
workarounds.add_option(
"--encoding",
dest="encoding",
metavar="ENCODING",
help="Force the specified encoding (experimental)",
)
workarounds.add_option(
"--no-check-certificate",
action="store_true",
dest="no_check_certificate",
default=False,
help="Suppress HTTPS certificate validation",
)
workarounds.add_option(
"--prefer-insecure",
"--prefer-unsecure",
action="store_true",
dest="prefer_insecure",
help="Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)",
)
workarounds.add_option(
"--user-agent",
metavar="UA",
dest="user_agent",
help="Specify a custom user agent",
)
workarounds.add_option(
"--referer",
metavar="URL",
dest="referer",
default=None,
help="Specify a custom referer, use if the video access is restricted to one domain",
)
workarounds.add_option(
"--add-header",
metavar="FIELD:VALUE",
dest="headers",
action="append",
help="Specify a custom HTTP header and its value, separated by a colon ':'. You can use this option multiple times",
)
workarounds.add_option(
"--bidi-workaround",
dest="bidi_workaround",
action="store_true",
help="Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH",
)
workarounds.add_option(
"--sleep-interval",
"--min-sleep-interval",
metavar="SECONDS",
dest="sleep_interval",
type=float,
help=(
"Number of seconds to sleep before each download when used alone "
"or a lower bound of a range for randomized sleep before each download "
"(minimum possible number of seconds to sleep) when used along with "
"--max-sleep-interval."
),
)
workarounds.add_option(
"--max-sleep-interval",
metavar="SECONDS",
dest="max_sleep_interval",
type=float,
help=(
"Upper bound of a range for randomized sleep before each download "
"(maximum possible number of seconds to sleep). Must only be used "
"along with --min-sleep-interval."
),
)
verbosity = optparse.OptionGroup(parser, "Verbosity / Simulation Options")
verbosity.add_option(
"-q",
"--quiet",
action="store_true",
dest="quiet",
default=False,
help="Activate quiet mode",
)
verbosity.add_option(
"--no-warnings",
dest="no_warnings",
action="store_true",
default=False,
help="Ignore warnings",
)
verbosity.add_option(
"-s",
"--simulate",
action="store_true",
dest="simulate",
default=False,
help="Do not download the video and do not write anything to disk",
)
verbosity.add_option(
"--skip-download",
action="store_true",
dest="skip_download",
default=False,
help="Do not download the video",
)
verbosity.add_option(
"-g",
"--get-url",
action="store_true",
dest="geturl",
default=False,
help="Simulate, quiet but print URL",
)
verbosity.add_option(
"-e",
"--get-title",
action="store_true",
dest="gettitle",
default=False,
help="Simulate, quiet but print title",
)
verbosity.add_option(
"--get-id",
action="store_true",
dest="getid",
default=False,
help="Simulate, quiet but print id",
)
verbosity.add_option(
"--get-thumbnail",
action="store_true",
dest="getthumbnail",
default=False,
help="Simulate, quiet but print thumbnail URL",
)
verbosity.add_option(
"--get-description",
action="store_true",
dest="getdescription",
default=False,
help="Simulate, quiet but print video description",
)
verbosity.add_option(
"--get-duration",
action="store_true",
dest="getduration",
default=False,
help="Simulate, quiet but print video length",
)
verbosity.add_option(
"--get-filename",
action="store_true",
dest="getfilename",
default=False,
help="Simulate, quiet but print output filename",
)
verbosity.add_option(
"--get-format",
action="store_true",
dest="getformat",
default=False,
help="Simulate, quiet but print output format",
)
verbosity.add_option(
"-j",
"--dump-json",
action="store_true",
dest="dumpjson",
default=False,
help='Simulate, quiet but print JSON information. See the "OUTPUT TEMPLATE" for a description of available keys.',
)
verbosity.add_option(
"-J",
"--dump-single-json",
action="store_true",
dest="dump_single_json",
default=False,
help="Simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist information in a single line.",
)
verbosity.add_option(
"--print-json",
action="store_true",
dest="print_json",
default=False,
help="Be quiet and print the video information as JSON (video is still being downloaded).",
)
verbosity.add_option(
"--newline",
action="store_true",
dest="progress_with_newline",
default=False,
help="Output progress bar as new lines",
)
verbosity.add_option(
"--no-progress",
action="store_true",
dest="noprogress",
default=False,
help="Do not print progress bar",
)
verbosity.add_option(
"--console-title",
action="store_true",
dest="consoletitle",
default=False,
help="Display progress in console titlebar",
)
verbosity.add_option(
"-v",
"--verbose",
action="store_true",
dest="verbose",
default=False,
help="Print various debugging information",
)
verbosity.add_option(
"--dump-pages",
"--dump-intermediate-pages",
action="store_true",
dest="dump_intermediate_pages",
default=False,
help="Print downloaded pages encoded using base64 to debug problems (very verbose)",
)
verbosity.add_option(
"--write-pages",
action="store_true",
dest="write_pages",
default=False,
help="Write downloaded intermediary pages to files in the current directory to debug problems",
)
verbosity.add_option(
"--youtube-print-sig-code",
action="store_true",
dest="youtube_print_sig_code",
default=False,
help=optparse.SUPPRESS_HELP,
)
verbosity.add_option(
"--print-traffic",
"--dump-headers",
dest="debug_printtraffic",
action="store_true",
default=False,
help="Display sent and read HTTP traffic",
)
verbosity.add_option(
"-C",
"--call-home",
dest="call_home",
action="store_true",
default=False,
help="Contact the youtube-dl server for debugging",
)
verbosity.add_option(
"--no-call-home",
dest="call_home",
action="store_false",
default=False,
help="Do NOT contact the youtube-dl server for debugging",
)
filesystem = optparse.OptionGroup(parser, "Filesystem Options")
filesystem.add_option(
"-a",
"--batch-file",
dest="batchfile",
metavar="FILE",
help="File containing URLs to download ('-' for stdin), one URL per line. "
"Lines starting with '#', ';' or ']' are considered as comments and ignored.",
)
filesystem.add_option(
"--id",
default=False,
action="store_true",
dest="useid",
help="Use only video ID in file name",
)
filesystem.add_option(
"-o",
"--output",
dest="outtmpl",
metavar="TEMPLATE",
help=('Output filename template, see the "OUTPUT TEMPLATE" for all the info'),
)
filesystem.add_option(
"--output-na-placeholder",
dest="outtmpl_na_placeholder",
metavar="PLACEHOLDER",
default="NA",
help=(
'Placeholder value for unavailable meta fields in output filename template (default is "%default")'
),
)
filesystem.add_option(
"--autonumber-size",
dest="autonumber_size",
metavar="NUMBER",
type=int,
help=optparse.SUPPRESS_HELP,
)
filesystem.add_option(
"--autonumber-start",
dest="autonumber_start",
metavar="NUMBER",
default=1,
type=int,
help="Specify the start value for %(autonumber)s (default is %default)",
)
filesystem.add_option(
"--restrict-filenames",
action="store_true",
dest="restrictfilenames",
default=False,
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames',
)
filesystem.add_option(
"-A",
"--auto-number",
action="store_true",
dest="autonumber",
default=False,
help=optparse.SUPPRESS_HELP,
)
filesystem.add_option(
"-t",
"--title",
action="store_true",
dest="usetitle",
default=False,
help=optparse.SUPPRESS_HELP,
)
filesystem.add_option(
"-l",
"--literal",
default=False,
action="store_true",
dest="usetitle",
help=optparse.SUPPRESS_HELP,
)
filesystem.add_option(
"-w",
"--no-overwrites",
action="store_true",
dest="nooverwrites",
default=False,
help="Do not overwrite files",
)
filesystem.add_option(
"-c",
"--continue",
action="store_true",
dest="continue_dl",
default=True,
help="Force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.",
)
filesystem.add_option(
"--no-continue",
action="store_false",
dest="continue_dl",
help="Do not resume partially downloaded files (restart from beginning)",
)
filesystem.add_option(
"--no-part",
action="store_true",
dest="nopart",
default=False,
help="Do not use .part files - write directly into output file",
)
filesystem.add_option(
"--no-mtime",
action="store_false",
dest="updatetime",
default=True,
help="Do not use the Last-modified header to set the file modification time",
)
filesystem.add_option(
"--write-description",
action="store_true",
dest="writedescription",
default=False,
help="Write video description to a .description file",
)
filesystem.add_option(
"--write-info-json",
action="store_true",
dest="writeinfojson",
default=False,
help="Write video metadata to a .info.json file",
)
filesystem.add_option(
"--write-annotations",
action="store_true",
dest="writeannotations",
default=False,
help="Write video annotations to a .annotations.xml file",
)
filesystem.add_option(
"--load-info-json",
"--load-info",
dest="load_info_filename",
metavar="FILE",
help='JSON file containing the video information (created with the "--write-info-json" option)',
)
filesystem.add_option(
"--cookies",
dest="cookiefile",
metavar="FILE",
help="File to read cookies from and dump cookie jar in",
)
filesystem.add_option(
"--cache-dir",
dest="cachedir",
default=None,
metavar="DIR",
help="Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.",
)
filesystem.add_option(
"--no-cache-dir",
action="store_const",
const=False,
dest="cachedir",
help="Disable filesystem caching",
)
filesystem.add_option(
"--rm-cache-dir",
action="store_true",
dest="rm_cachedir",
help="Delete all filesystem cache files",
)
thumbnail = optparse.OptionGroup(parser, "Thumbnail Options")
thumbnail.add_option(
"--write-thumbnail",
action="store_true",
dest="writethumbnail",
default=False,
help="Write thumbnail image to disk",
)
thumbnail.add_option(
"--write-all-thumbnails",
action="store_true",
dest="write_all_thumbnails",
default=False,
help="Write all thumbnail image formats to disk",
)
thumbnail.add_option(
"--list-thumbnails",
action="store_true",
dest="list_thumbnails",
default=False,
help="Simulate and list all available thumbnail formats",
)
postproc = optparse.OptionGroup(parser, "Post-processing Options")
postproc.add_option(
"-x",
"--extract-audio",
action="store_true",
dest="extractaudio",
default=False,
help="Convert video files to audio-only files (requires ffmpeg/avconv and ffprobe/avprobe)",
)
postproc.add_option(
"--audio-format",
metavar="FORMAT",
dest="audioformat",
default="best",
help='Specify audio format: "best", "aac", "flac", "mp3", "m4a", "opus", "vorbis", or "wav"; "%default" by default; No effect without -x',
)
postproc.add_option(
"--audio-quality",
metavar="QUALITY",
dest="audioquality",
default="5",
help="Specify ffmpeg/avconv audio quality, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default %default)",
)
postproc.add_option(
"--recode-video",
metavar="FORMAT",
dest="recodevideo",
default=None,
help="Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv|avi)",
)
postproc.add_option(
"--postprocessor-args",
dest="postprocessor_args",
metavar="ARGS",
help="Give these arguments to the postprocessor",
)
postproc.add_option(
"-k",
"--keep-video",
action="store_true",
dest="keepvideo",
default=False,
help="Keep the video file on disk after the post-processing; the video is erased by default",
)
postproc.add_option(
"--no-post-overwrites",
action="store_true",
dest="nopostoverwrites",
default=False,
help="Do not overwrite post-processed files; the post-processed files are overwritten by default",
)
postproc.add_option(
"--embed-subs",
action="store_true",
dest="embedsubtitles",
default=False,
help="Embed subtitles in the video (only for mp4, webm and mkv videos)",
)
postproc.add_option(
"--embed-thumbnail",
action="store_true",
dest="embedthumbnail",
default=False,
help="Embed thumbnail in the audio as cover art",
)
postproc.add_option(
"--add-metadata",
action="store_true",
dest="addmetadata",
default=False,
help="Write metadata to the video file",
)
postproc.add_option(
"--metadata-from-title",
metavar="FORMAT",
dest="metafromtitle",
help="Parse additional metadata like song title / artist from the video title. "
"The format syntax is the same as --output. Regular expression with "
"named capture groups may also be used. "
"The parsed parameters replace existing values. "
'Example: --metadata-from-title "%(artist)s - %(title)s" matches a title like '
'"Coldplay - Paradise". '
'Example (regex): --metadata-from-title "(?P<artist>.+?) - (?P<title>.+)"',
)
postproc.add_option(
"--xattrs",
action="store_true",
dest="xattrs",
default=False,
help="Write metadata to the video file's xattrs (using dublin core and xdg standards)",
)
postproc.add_option(
"--fixup",
metavar="POLICY",
dest="fixup",
default="detect_or_warn",
help="Automatically correct known faults of the file. "
"One of never (do nothing), warn (only emit a warning), "
"detect_or_warn (the default; fix file if we can, warn otherwise)",
)
postproc.add_option(
"--prefer-avconv",
action="store_false",
dest="prefer_ffmpeg",
help="Prefer avconv over ffmpeg for running the postprocessors",
)
postproc.add_option(
"--prefer-ffmpeg",
action="store_true",
dest="prefer_ffmpeg",
help="Prefer ffmpeg over avconv for running the postprocessors (default)",
)
postproc.add_option(
"--ffmpeg-location",
"--avconv-location",
metavar="PATH",
dest="ffmpeg_location",
help="Location of the ffmpeg/avconv binary; either the path to the binary or its containing directory.",
)
postproc.add_option(
"--exec",
metavar="CMD",
dest="exec_cmd",
help="Execute a command on the file after downloading and post-processing, similar to find's -exec syntax. Example: --exec 'adb push {} /sdcard/Music/ && rm {}'",
)
postproc.add_option(
"--convert-subs",
"--convert-subtitles",
metavar="FORMAT",
dest="convertsubtitles",
default=None,
help="Convert the subtitles to other format (currently supported: srt|ass|vtt|lrc)",
)
parser.add_option_group(general)
parser.add_option_group(network)
parser.add_option_group(geo)
parser.add_option_group(selection)
parser.add_option_group(downloader)
parser.add_option_group(filesystem)
parser.add_option_group(thumbnail)
parser.add_option_group(verbosity)
parser.add_option_group(workarounds)
parser.add_option_group(video_format)
parser.add_option_group(subtitles)
parser.add_option_group(authentication)
parser.add_option_group(adobe_pass)
parser.add_option_group(postproc)
if overrideArguments is not None:
opts, args = parser.parse_args(overrideArguments)
if opts.verbose:
write_string("[debug] Override config: " + repr(overrideArguments) + "\n")
else:
def compat_conf(conf):
if sys.version_info < (3,):
return [a.decode(preferredencoding(), "replace") for a in conf]
return conf
command_line_conf = compat_conf(sys.argv[1:])
opts, args = parser.parse_args(command_line_conf)
system_conf = user_conf = custom_conf = []
if "--config-location" in command_line_conf:
location = compat_expanduser(opts.config_location)
if os.path.isdir(location):
location = os.path.join(location, "youtube-dl.conf")
if not os.path.exists(location):
parser.error("config-location %s does not exist." % location)
custom_conf = _readOptions(location)
elif "--ignore-config" in command_line_conf:
pass
else:
system_conf = _readOptions("/etc/youtube-dl.conf")
if "--ignore-config" not in system_conf:
user_conf = _readUserConf()
argv = system_conf + user_conf + custom_conf + command_line_conf
opts, args = parser.parse_args(argv)
if opts.verbose:
for conf_label, conf in (
("System config", system_conf),
("User config", user_conf),
("Custom config", custom_conf),
("Command-line args", command_line_conf),
):
write_string(
"[debug] %s: %s\n" % (conf_label, repr(_hide_login_info(conf)))
)
return parser, opts, args
| 32.156836 | 391 | 0.587207 |
be24de1b6910b3804a6e51f66f3e528cfc475dd5
| 1,828 |
py
|
Python
|
ENIAC/api/eniac_bps/loopback.py
|
webclinic017/fast_tools
|
144d764e4f169d3ab3753dcc6a79db9f9449de59
|
[
"Apache-2.0"
] | 1 |
2021-12-11T16:33:54.000Z
|
2021-12-11T16:33:54.000Z
|
ENIAC/api/eniac_bps/loopback.py
|
webclinic017/fast_tools
|
144d764e4f169d3ab3753dcc6a79db9f9449de59
|
[
"Apache-2.0"
] | null | null | null |
ENIAC/api/eniac_bps/loopback.py
|
webclinic017/fast_tools
|
144d764e4f169d3ab3753dcc6a79db9f9449de59
|
[
"Apache-2.0"
] | 3 |
2021-11-22T09:46:43.000Z
|
2022-01-28T22:33:07.000Z
|
# loop 计算
from sanic.blueprints import Blueprint
# from sanic import response
# from sanic_openapi import doc
# from kafka import KafkaProducer
# import json
# from ..models import StrategyDto
loop = Blueprint('loop', url_prefix='/loop', strict_slashes=True)
# # todo 上传文件
# @loop.put('/test', stream=True)
# @doc.summary('上传文件')
# async def loop_test(request):
# result = ''
# while True:
# body = await request.stream.get()
# if body is None:
# break
# result += body.decode('utf-8')
# name = loop_cls_re(result)
# ip_module = importlib.import_module(".", f"api.btscript.{name}")
# ip_module_cls = getattr(ip_module, "demo")
# cls_obj = ip_module_cls()
# ip = cls_obj.get_ip()
# return response.text(ip)
# todo 传输json消息
# @loop.route("/calculate", methods=["POST"], version='v1', name='Dto')
# @doc.summary('回测计算')
# @doc.description('接受回测数据进行回测计算')
# @doc.consumes(StrategyDto, location='body', required=True)
# async def post_data(request):
# rule = request.json
# btrun.startRun(rule)
# return response.json(
# {'message': 'Congratulations Your Strategy, Go Fly!'},
# headers={'X-Served-By': 'sanic'},
# status=200
# )
# # todo 发送消息到kafka
# @loop.route("/testbt", methods=["POST"])#, version='v1', name='Dto')
# @doc.summary('发送Kafka回测消息')
# @doc.description('发送json消息到kafka提供回测计算')
# @doc.consumes(StrategyDto, location='body', required=True)
# async def post_data(request):
# producer = KafkaProducer(bootstrap_servers= kafkaList)
# producer.send('back_trader', json.dumps(request.json, ensure_ascii=False).encode('utf-8'))
# producer.close()
# return response.json(
# {'message': 'Strategy Success To Kafka!'},
# headers={'X-Served-By': 'sanic'},
# status=200
# )
| 32.642857 | 96 | 0.643873 |
31c8a7cec4ca773907f5c66caf8a132cd9468cfb
| 7,494 |
py
|
Python
|
salt/modules/boto_sns.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 12 |
2015-01-21T00:18:25.000Z
|
2021-07-11T07:35:26.000Z
|
salt/modules/boto_sns.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 86 |
2017-01-27T11:54:46.000Z
|
2020-05-20T06:25:26.000Z
|
salt/modules/boto_sns.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 12 |
2015-01-05T09:50:42.000Z
|
2019-08-19T01:43:40.000Z
|
# -*- coding: utf-8 -*-
'''
Connection module for Amazon SNS
:configuration: This module accepts explicit sns credentials but can also
utilize IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
sns.keyid: GKTADJGHEIQSXMKKRBJ08H
sns.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
sns.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto
'''
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libs
import salt.utils.versions
log = logging.getLogger(__name__)
# Import third party libs
try:
#pylint: disable=unused-import
import boto
import boto.sns
#pylint: enable=unused-import
logging.getLogger('boto').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def __virtual__():
'''
Only load if boto libraries exist.
'''
has_boto_reqs = salt.utils.versions.check_boto_reqs(
check_boto3=False
)
if has_boto_reqs is True:
__utils__['boto.assign_funcs'](__name__, 'sns', pack=__salt__)
return has_boto_reqs
def get_all_topics(region=None, key=None, keyid=None, profile=None):
'''
Returns a list of the all topics..
CLI example::
salt myminion boto_sns.get_all_topics
'''
cache_key = _cache_get_key()
try:
return __context__[cache_key]
except KeyError:
pass
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
__context__[cache_key] = {}
# TODO: support >100 SNS topics (via NextToken)
topics = conn.get_all_topics()
for t in topics['ListTopicsResponse']['ListTopicsResult']['Topics']:
short_name = t['TopicArn'].split(':')[-1]
__context__[cache_key][short_name] = t['TopicArn']
return __context__[cache_key]
def exists(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if an SNS topic exists.
CLI example::
salt myminion boto_sns.exists mytopic region=us-east-1
'''
topics = get_all_topics(region=region, key=key, keyid=keyid,
profile=profile)
if name.startswith('arn:aws:sns:'):
return name in list(topics.values())
else:
return name in list(topics.keys())
def create(name, region=None, key=None, keyid=None, profile=None):
'''
Create an SNS topic.
CLI example to create a topic::
salt myminion boto_sns.create mytopic region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.create_topic(name)
log.info('Created SNS topic %s', name)
_invalidate_cache()
return True
def delete(name, region=None, key=None, keyid=None, profile=None):
'''
Delete an SNS topic.
CLI example to delete a topic::
salt myminion boto_sns.delete mytopic region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_topic(get_arn(name, region, key, keyid, profile))
log.info('Deleted SNS topic %s', name)
_invalidate_cache()
return True
def get_all_subscriptions_by_topic(name, region=None, key=None, keyid=None, profile=None):
'''
Get list of all subscriptions to a specific topic.
CLI example to delete a topic::
salt myminion boto_sns.get_all_subscriptions_by_topic mytopic region=us-east-1
'''
cache_key = _subscriptions_cache_key(name)
try:
return __context__[cache_key]
except KeyError:
pass
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
ret = conn.get_all_subscriptions_by_topic(get_arn(name, region, key, keyid, profile))
__context__[cache_key] = ret['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['Subscriptions']
return __context__[cache_key]
def subscribe(topic, protocol, endpoint, region=None, key=None, keyid=None, profile=None):
'''
Subscribe to a Topic.
CLI example to delete a topic::
salt myminion boto_sns.subscribe mytopic https https://www.example.com/sns-endpoint region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.subscribe(get_arn(topic, region, key, keyid, profile), protocol, endpoint)
log.info('Subscribe %s %s to %s topic', protocol, endpoint, topic)
try:
del __context__[_subscriptions_cache_key(topic)]
except KeyError:
pass
return True
def unsubscribe(topic, subscription_arn, region=None, key=None, keyid=None, profile=None):
'''
Unsubscribe a specific SubscriptionArn of a topic.
CLI Example:
.. code-block:: bash
salt myminion boto_sns.unsubscribe my_topic my_subscription_arn region=us-east-1
.. versionadded:: 2016.11.0
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if subscription_arn.startswith('arn:aws:sns:') is False:
return False
try:
conn.unsubscribe(subscription_arn)
log.info('Unsubscribe %s to %s topic', subscription_arn, topic)
except Exception as e:
log.error('Unsubscribe Error', exc_info=True)
return False
else:
__context__.pop(_subscriptions_cache_key(topic), None)
return True
def get_arn(name, region=None, key=None, keyid=None, profile=None):
'''
Returns the full ARN for a given topic name.
CLI example::
salt myminion boto_sns.get_arn mytopic
'''
if name.startswith('arn:aws:sns:'):
return name
account_id = __salt__['boto_iam.get_account_id'](
region=region, key=key, keyid=keyid, profile=profile
)
return 'arn:aws:sns:{0}:{1}:{2}'.format(_get_region(region, profile),
account_id, name)
def _get_region(region=None, profile=None):
if profile and 'region' in profile:
return profile['region']
if not region and __salt__['config.option'](profile):
_profile = __salt__['config.option'](profile)
region = _profile.get('region', None)
if not region and __salt__['config.option']('sns.region'):
region = __salt__['config.option']('sns.region')
if not region:
region = 'us-east-1'
return region
def _subscriptions_cache_key(name):
return '{0}_{1}_subscriptions'.format(_cache_get_key(), name)
def _invalidate_cache():
try:
del __context__[_cache_get_key()]
except KeyError:
pass
def _cache_get_key():
return 'boto_sns.topics_cache'
| 29.046512 | 119 | 0.678143 |
1a348a49900dd056d743b15b33cc3c9fb614b52b
| 4,576 |
py
|
Python
|
tokenization/vocab_tokenizers.py
|
popescuaaa/tudir
|
019846d1941f09fc7dc2e1c07e33d3f3d9f184dc
|
[
"MIT"
] | null | null | null |
tokenization/vocab_tokenizers.py
|
popescuaaa/tudir
|
019846d1941f09fc7dc2e1c07e33d3f3d9f184dc
|
[
"MIT"
] | null | null | null |
tokenization/vocab_tokenizers.py
|
popescuaaa/tudir
|
019846d1941f09fc7dc2e1c07e33d3f3d9f184dc
|
[
"MIT"
] | null | null | null |
import sys
import os
sys.path.append(os.getcwd())
import torch
import tokenizers
import sklearn
from tokenizers import SentencePieceBPETokenizer
from tokenizers import SentencePieceUnigramTokenizer
from tokenizers import BertWordPieceTokenizer
from tokenizers import Tokenizer
from tokenizers.models import WordPiece
from tokenizers.trainers import WordPieceTrainer, BpeTrainer, UnigramTrainer
# whitespace pretokenizer ?
from tokenizers.pre_tokenizers import Whitespace
# use bert pretokenizer
from typing import List
unk_token = "<UNK>"
spl_tokens = ["<UNK>", "<SEP>", "<MASK>", "<CLS>"]
def is_filepath_list(filelist: List[str]) -> bool:
"""
Check if a list of filepaths is a list of files.
"""
for file in filelist:
if not os.path.isfile(file):
return False
return True
def train_iterator_mul_files(files):
for path in files:
with open(path, "r") as f:
for line in f:
yield line
def train_WordPieceTokenizer(file_list: List[str], vocab_size=30_000, min_frequency=5, limit_alphabet=500,
save: bool = True):
"""
Train WP tokenizer from a list of files.
"""
tokenizer = Tokenizer(WordPiece(unk_token=unk_token))
trainer = WordPieceTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=spl_tokens,
show_progress=True,
limit_alphabet=limit_alphabet
)
tokenizer.pre_tokenizer = Whitespace()
if is_filepath_list(file_list):
tokenizer.train(file_list, trainer=trainer)
else:
trainer.train_from_iterator(file_list, trainer=trainer)
if save:
tokenizer.save("./WP_tok-trained.json")
tokenizer = Tokenizer.from_file("./WP_tok-trained.json")
return tokenizer
def train_SentencePieceBPETokenizer(files: List[str], vocab_size=30_000, min_frequency=5, limit_alphabet=500,
save: bool = True):
"""
trin SP_BPE tokenizer from a list of files.
"""
if is_filepath_list(files):
train_it = train_iterator_mul_files(files)
else:
train_it = files
tokenizer = SentencePieceBPETokenizer()
tokenizer.train_from_iterator(
train_it,
vocab_size=vocab_size,
min_frequency=min_frequency,
show_progress=True,
limit_alphabet=limit_alphabet,
)
if save:
tokenizer.save("./SP_BPE_tok-trained.json")
tokenizer = Tokenizer.from_file("./SP_BPE_tok-trained.json")
return tokenizer
def train_SentencePieceUGTokenizer(filelist: List[str], vocab_size=30_000, save: bool = True):
"""
trin SP_UG tokenizer from a list of files.
"""
if is_filepath_list(filelist):
train_it = train_iterator_mul_files(filelist)
else:
train_it = filelist
tokenizer = SentencePieceUnigramTokenizer()
tokenizer.train_from_iterator(
train_it,
vocab_size=vocab_size,
show_progress=True
)
if save:
tokenizer.save("./SP_UG_tok-trained.json")
tokenizer = Tokenizer.from_file("./SP_UG_tok-trained.json")
return tokenizer
def train_BertWordPieceTokenizer(filelist: List[str], vocab_size=30_000, min_frequency=5, limit_alphabet=500,
save: bool = True):
"""
trin BERT tokenizer from a list of files.
"""
if is_filepath_list(filelist):
train_it = train_iterator_mul_files(filelist)
else:
train_it = filelist
tokenizer = BertWordPieceTokenizer()
tokenizer.normalizer = tokenizers.normalizers.BertNormalizer(strip_accents=True, lowercase=True)
tokenizer.train_from_iterator(
train_it,
vocab_size=vocab_size,
show_progress=True,
min_frequency=min_frequency,
limit_alphabet=limit_alphabet,
)
if save:
tokenizer.save("./BERT_tok-trained.json")
tokenizer = Tokenizer.from_file("./BERT_tok-trained.json")
return tokenizer
def get_vocab_from_tokenizer(tokenizer: Tokenizer):
"""
Get vocab from tokenizer.
"""
vocab = tokenizer.get_vocab()
return vocab
if __name__ == '__main__':
# create corpus
print(os.getcwd())
corpus = os.listdir(".corpus_caches/orcas/medium")
corpus = [".corpus_caches/orcas/medium/" + file for file in corpus]
tokenizer = train_BertWordPieceTokenizer(corpus, vocab_size=30_000)
| 29.908497 | 110 | 0.656906 |
b25e8b28532399c398d659e54ae34b1147636fea
| 14,443 |
py
|
Python
|
gdal/swig/python/osgeo/utils/gdal_edit.py
|
jpapadakis/gdal
|
f07aa15fd65af36b04291303cc6834c87f662814
|
[
"MIT"
] | 3,100 |
2015-01-02T10:33:40.000Z
|
2022-03-31T02:06:51.000Z
|
gdal/swig/python/osgeo/utils/gdal_edit.py
|
jpapadakis/gdal
|
f07aa15fd65af36b04291303cc6834c87f662814
|
[
"MIT"
] | 3,496 |
2015-01-06T16:53:30.000Z
|
2022-03-31T20:18:51.000Z
|
gdal/swig/python/osgeo/utils/gdal_edit.py
|
jpapadakis/gdal
|
f07aa15fd65af36b04291303cc6834c87f662814
|
[
"MIT"
] | 2,036 |
2015-01-08T20:22:12.000Z
|
2022-03-31T10:24:08.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL samples
# Purpose: Edit in place various information of an existing GDAL dataset
# Author: Even Rouault <even dot rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2011-2013, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
from osgeo import gdal
from osgeo import osr
def Usage():
print('Usage: gdal_edit [--help-general] [-ro] [-a_srs srs_def]')
print(' [-a_ullr ulx uly lrx lry] [-a_ulurll ulx uly urx ury llx lly]')
print(' [-tr xres yres] [-unsetgt] [-unsetrpc] [-a_nodata value] [-unsetnodata]')
print(' [-offset value] [-scale value] [-units value]')
print(' [-colorinterp_X red|green|blue|alpha|gray|undefined]*')
print(' [-unsetstats] [-stats] [-approx_stats]')
print(' [-setstats min max mean stddev]')
print(' [-gcp pixel line easting northing [elevation]]*')
print(' [-unsetmd] [-oo NAME=VALUE]* [-mo "META-TAG=VALUE"]* datasetname')
print('')
print('Edit in place various information of an existing GDAL dataset.')
return -1
def ArgIsNumeric(s):
i = 0
while i < len(s):
if (s[i] < '0' or s[i] > '9') and s[i] != '.' and s[i] != 'e' and s[i] != '+' and s[i] != '-':
return False
i = i + 1
return True
def gdal_edit(argv):
argv = gdal.GeneralCmdLineProcessor(argv)
if argv is None:
return -1
datasetname = None
srs = None
ulx = None
uly = None
urx = None
ury = None
llx = None
lly = None
lrx = None
lry = None
nodata = None
unsetnodata = False
units = None
xres = None
yres = None
unsetgt = False
unsetstats = False
stats = False
setstats = False
approx_stats = False
unsetmd = False
ro = False
molist = []
gcp_list = []
open_options = []
offset = []
scale = []
colorinterp = {}
unsetrpc = False
i = 1
argc = len(argv)
while i < argc:
if argv[i] == '-ro':
ro = True
elif argv[i] == '-a_srs' and i < len(argv) - 1:
srs = argv[i + 1]
i = i + 1
elif argv[i] == '-a_ullr' and i < len(argv) - 4:
ulx = float(argv[i + 1])
i = i + 1
uly = float(argv[i + 1])
i = i + 1
lrx = float(argv[i + 1])
i = i + 1
lry = float(argv[i + 1])
i = i + 1
elif argv[i] == '-a_ulurll' and i < len(argv) - 6:
ulx = float(argv[i + 1])
i = i + 1
uly = float(argv[i + 1])
i = i + 1
urx = float(argv[i + 1])
i = i + 1
ury = float(argv[i + 1])
i = i + 1
llx = float(argv[i + 1])
i = i + 1
lly = float(argv[i + 1])
i = i + 1
elif argv[i] == '-tr' and i < len(argv) - 2:
xres = float(argv[i + 1])
i = i + 1
yres = float(argv[i + 1])
i = i + 1
elif argv[i] == '-a_nodata' and i < len(argv) - 1:
nodata = float(argv[i + 1])
i = i + 1
elif argv[i] == '-scale' and i < len(argv) -1:
scale.append(float(argv[i+1]))
i = i + 1
while i < len(argv) - 1 and ArgIsNumeric(argv[i+1]):
scale.append(float(argv[i+1]))
i = i + 1
elif argv[i] == '-offset' and i < len(argv) - 1:
offset.append(float(argv[i+1]))
i = i + 1
while i < len(argv) - 1 and ArgIsNumeric(argv[i+1]):
offset.append(float(argv[i+1]))
i = i + 1
elif argv[i] == '-mo' and i < len(argv) - 1:
molist.append(argv[i + 1])
i = i + 1
elif argv[i] == '-gcp' and i + 4 < len(argv):
pixel = float(argv[i + 1])
i = i + 1
line = float(argv[i + 1])
i = i + 1
x = float(argv[i + 1])
i = i + 1
y = float(argv[i + 1])
i = i + 1
if i + 1 < len(argv) and ArgIsNumeric(argv[i + 1]):
z = float(argv[i + 1])
i = i + 1
else:
z = 0
gcp = gdal.GCP(x, y, z, pixel, line)
gcp_list.append(gcp)
elif argv[i] == '-unsetgt':
unsetgt = True
elif argv[i] == '-unsetrpc':
unsetrpc = True
elif argv[i] == '-unsetstats':
unsetstats = True
elif argv[i] == '-approx_stats':
stats = True
approx_stats = True
elif argv[i] == '-stats':
stats = True
elif argv[i] == '-setstats' and i < len(argv)-4:
stats = True
setstats = True
if argv[i + 1] != 'None':
statsmin = float(argv[i + 1])
else:
statsmin = None
i = i + 1
if argv[i + 1] != 'None':
statsmax = float(argv[i + 1])
else:
statsmax = None
i = i + 1
if argv[i + 1] != 'None':
statsmean = float(argv[i + 1])
else:
statsmean = None
i = i + 1
if argv[i + 1] != 'None':
statsdev = float(argv[i + 1])
else:
statsdev = None
i = i + 1
elif argv[i] == '-units' and i < len(argv) - 1:
units = argv[i + 1]
i = i + 1
elif argv[i] == '-unsetmd':
unsetmd = True
elif argv[i] == '-unsetnodata':
unsetnodata = True
elif argv[i] == '-oo' and i < len(argv) - 1:
open_options.append(argv[i + 1])
i = i + 1
elif argv[i].startswith('-colorinterp_')and i < len(argv) - 1:
band = int(argv[i][len('-colorinterp_'):])
val = argv[i + 1]
if val.lower() == 'red':
val = gdal.GCI_RedBand
elif val.lower() == 'green':
val = gdal.GCI_GreenBand
elif val.lower() == 'blue':
val = gdal.GCI_BlueBand
elif val.lower() == 'alpha':
val = gdal.GCI_AlphaBand
elif val.lower() == 'gray' or val.lower() == 'grey':
val = gdal.GCI_GrayIndex
elif val.lower() == 'undefined':
val = gdal.GCI_Undefined
else:
sys.stderr.write('Unsupported color interpretation %s.\n' % val +
'Only red, green, blue, alpha, gray, undefined are supported.\n')
return Usage()
colorinterp[band] = val
i = i + 1
elif argv[i][0] == '-':
sys.stderr.write('Unrecognized option : %s\n' % argv[i])
return Usage()
elif datasetname is None:
datasetname = argv[i]
else:
sys.stderr.write('Unexpected option : %s\n' % argv[i])
return Usage()
i = i + 1
if datasetname is None:
return Usage()
if (srs is None and lry is None and yres is None and not unsetgt and
not unsetstats and not stats and not setstats and nodata is None and
not units and not molist and not unsetmd and not gcp_list and
not unsetnodata and not colorinterp and
scale is None and offset is None and not unsetrpc):
print('No option specified')
print('')
return Usage()
exclusive_option = 0
if lry is not None:
exclusive_option = exclusive_option + 1
if lly is not None: # -a_ulurll
exclusive_option = exclusive_option + 1
if yres is not None:
exclusive_option = exclusive_option + 1
if unsetgt:
exclusive_option = exclusive_option + 1
if exclusive_option > 1:
print('-a_ullr, -a_ulurll, -tr and -unsetgt options are exclusive.')
print('')
return Usage()
if unsetstats and stats:
print('-unsetstats and either -stats or -approx_stats options are exclusive.')
print('')
return Usage()
if unsetnodata and nodata:
print('-unsetnodata and -nodata options are exclusive.')
print('')
return Usage()
if open_options is not None:
if ro:
ds = gdal.OpenEx(datasetname, gdal.OF_RASTER, open_options=open_options)
else:
ds = gdal.OpenEx(datasetname, gdal.OF_RASTER | gdal.OF_UPDATE, open_options=open_options)
# GDAL 1.X compat
elif ro:
ds = gdal.Open(datasetname)
else:
ds = gdal.Open(datasetname, gdal.GA_Update)
if ds is None:
return -1
if scale:
if len(scale) == 1:
scale = scale * ds.RasterCount
elif len(scale) != ds.RasterCount:
print('If more than one scale value is provided, their number must match the number of bands.')
print('')
return Usage()
if offset:
if len(offset) == 1:
offset = offset * ds.RasterCount
elif len(offset) != ds.RasterCount:
print('If more than one offset value is provided, their number must match the number of bands.')
print('')
return Usage()
wkt = None
if srs == '' or srs == 'None':
ds.SetProjection('')
elif srs is not None:
sr = osr.SpatialReference()
if sr.SetFromUserInput(srs) != 0:
print('Failed to process SRS definition: %s' % srs)
return -1
wkt = sr.ExportToWkt()
if not gcp_list:
ds.SetProjection(wkt)
if lry is not None:
gt = [ulx, (lrx - ulx) / ds.RasterXSize, 0,
uly, 0, (lry - uly) / ds.RasterYSize]
ds.SetGeoTransform(gt)
elif lly is not None: # -a_ulurll
gt = [ulx, (urx - ulx) / ds.RasterXSize, (llx - ulx) / ds.RasterYSize,
uly, (ury - uly) / ds.RasterXSize, (lly - uly) / ds.RasterYSize]
ds.SetGeoTransform(gt)
if yres is not None:
gt = ds.GetGeoTransform()
# Doh ! why is gt a tuple and not an array...
gt = [gt[j] for j in range(6)]
gt[1] = xres
gt[5] = yres
ds.SetGeoTransform(gt)
if unsetgt:
# For now only the GTiff drivers understands full-zero as a hint
# to unset the geotransform
if ds.GetDriver().ShortName == 'GTiff':
ds.SetGeoTransform([0, 0, 0, 0, 0, 0])
else:
ds.SetGeoTransform([0, 1, 0, 0, 0, 1])
if gcp_list:
if wkt is None:
wkt = ds.GetGCPProjection()
if wkt is None:
wkt = ''
ds.SetGCPs(gcp_list, wkt)
if nodata is not None:
for i in range(ds.RasterCount):
ds.GetRasterBand(i + 1).SetNoDataValue(nodata)
elif unsetnodata:
for i in range(ds.RasterCount):
ds.GetRasterBand(i + 1).DeleteNoDataValue()
if scale:
for i in range(ds.RasterCount):
ds.GetRasterBand(i + 1).SetScale(scale[i])
if offset:
for i in range(ds.RasterCount):
ds.GetRasterBand(i + 1).SetOffset(offset[i])
if units:
for i in range(ds.RasterCount):
ds.GetRasterBand(i + 1).SetUnitType(units)
if unsetstats:
for i in range(ds.RasterCount):
band = ds.GetRasterBand(i + 1)
for key in band.GetMetadata().keys():
if key.startswith('STATISTICS_'):
band.SetMetadataItem(key, None)
if stats:
for i in range(ds.RasterCount):
ds.GetRasterBand(i + 1).ComputeStatistics(approx_stats)
if setstats:
for i in range(ds.RasterCount):
if statsmin is None or statsmax is None or statsmean is None or statsdev is None:
ds.GetRasterBand(i+1).ComputeStatistics(approx_stats)
min,max,mean,stdev = ds.GetRasterBand(i+1).GetStatistics(approx_stats,True)
if statsmin is None:
statsmin = min
if statsmax is None:
statsmax = max
if statsmean is None:
statsmean = mean
if statsdev is None:
statsdev = stdev
ds.GetRasterBand(i+1).SetStatistics(statsmin, statsmax, statsmean, statsdev)
if molist:
if unsetmd:
md = {}
else:
md = ds.GetMetadata()
for moitem in molist:
equal_pos = moitem.find('=')
if equal_pos > 0:
md[moitem[0:equal_pos]] = moitem[equal_pos + 1:]
ds.SetMetadata(md)
elif unsetmd:
ds.SetMetadata({})
for band in colorinterp:
ds.GetRasterBand(band).SetColorInterpretation(colorinterp[band])
if unsetrpc:
ds.SetMetadata(None, 'RPC')
ds = band = None
return 0
def main(argv):
return gdal_edit(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 33.666667 | 108 | 0.509797 |
f04ab7b3d23641af333b0a22ec60bcc6073e2a0a
| 4,571 |
py
|
Python
|
train.py
|
ultronify/cartpole-tf-aac
|
707586df3954d815e5a77a843347cf3560141387
|
[
"MIT"
] | null | null | null |
train.py
|
ultronify/cartpole-tf-aac
|
707586df3954d815e5a77a843347cf3560141387
|
[
"MIT"
] | null | null | null |
train.py
|
ultronify/cartpole-tf-aac
|
707586df3954d815e5a77a843347cf3560141387
|
[
"MIT"
] | null | null | null |
import gym
import numpy as np
import tensorflow as tf
from tensorflow import optimizers
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense
class ActorCriticModel(Model):
def __init__(self, action_space_size, state_space_size):
super(ActorCriticModel, self).__init__()
self.action_space_size = action_space_size
self.state_space_size = state_space_size
# The critic part
self.val_input = Dense(units=256, input_dim=self.state_space_size,
activation='relu', kernel_initializer='he_uniform')
self.val_output = Dense(units=1, activation='linear')
# The actor part
self.policy_input = Dense(units=256, input_dim=self.state_space_size,
activation='relu', kernel_initializer='he_uniform')
self.policy_output = Dense(
units=self.action_space_size, activation='softmax')
def call(self, inputs, **kwargs):
# The critic part
val_x = self.val_input(inputs)
val = self.val_output(val_x)
# The actor part
action_x = self.policy_input(inputs)
action_dist = self.policy_output(action_x)
return action_dist, val
def sample_action(action_space_size, probs, use_max=False):
if use_max:
return np.argmax(probs)
else:
return np.random.choice(action_space_size, p=probs/probs.sum())
def eval(model, env, max_eps, action_space_size):
total_reward = 0.0
for _ in range(max_eps):
done = False
state = env.reset()
while not done:
action_dist, _ = model(tf.convert_to_tensor([state]))
action = sample_action(
action_space_size, action_dist.numpy()[0], use_max=True)
state, reward, done, _ = env.step(action)
total_reward += reward
avg_reward = total_reward / max_eps
return avg_reward
def compute_discounted_rewards(rewards, gamma):
discounted_reward = 0
discounted_rewards = []
for reward in rewards[::-1]:
discounted_reward = gamma * discounted_reward + reward
discounted_rewards.append([discounted_reward])
return discounted_rewards[::-1]
def train(max_eps=1000, gamma=0.99):
env = gym.make('CartPole-v0')
eval_env = gym.make('CartPole-v0')
action_space_size = env.action_space.n
state_space_size = env.observation_space.shape[0]
print('Initialize with action space size {0} and state space size {1}'.format(
action_space_size, state_space_size))
actor_critic_model = ActorCriticModel(action_space_size, state_space_size)
optimizer = tf.optimizers.Adam(learning_rate=1e-3)
for eps in range(max_eps):
state = env.reset()
done = False
rewards, actions, states = [], [], []
while not done:
action_dist, _ = actor_critic_model(
tf.convert_to_tensor([state], dtype=tf.float32))
action = sample_action(
action_space_size, action_dist.numpy()[0])
next_state, reward, done, _ = env.step(action)
rewards.append(reward)
actions.append(action)
states.append(state)
state = next_state
# Calculate the gradient after the episode ends
with tf.GradientTape() as tape:
probs, vals = actor_critic_model(
tf.convert_to_tensor(states, dtype=tf.float32))
q_vals = tf.convert_to_tensor(
compute_discounted_rewards(rewards, gamma), dtype=tf.float32)
advantages = q_vals - vals
value_loss = advantages ** 2
clipped_probs = tf.clip_by_value(probs, 1e-10, 1-1e-10)
log_probs = tf.math.log(clipped_probs)
action_onehot = tf.one_hot(
actions, action_space_size, dtype=tf.float32)
policy_loss = -(log_probs * action_onehot) * advantages
entropy_loss = -tf.reduce_sum(probs * log_probs)
loss = tf.reduce_mean(0.5 * value_loss) + \
tf.reduce_mean(policy_loss) + 0.01 * entropy_loss
gradients = tape.gradient(loss, actor_critic_model.trainable_weights)
optimizer.apply_gradients(
zip(gradients, actor_critic_model.trainable_weights))
eval_score = eval(actor_critic_model, eval_env, 10, action_space_size)
print(
'Finished training {0}/{1} with score {2}'.format(eps, max_eps, eval_score))
env.close()
print('Done!')
if __name__ == '__main__':
train()
| 39.405172 | 88 | 0.643404 |
aead49251f6b3306225a5b97f50295ffb36ae4b1
| 753 |
py
|
Python
|
examples/sqs_queue_create.py
|
Neki/aiobotocore
|
0b71254446ef7ba6d93016d5a8b5b02c665a7b0b
|
[
"Apache-2.0"
] | 772 |
2016-02-12T13:20:26.000Z
|
2022-03-29T20:51:37.000Z
|
examples/sqs_queue_create.py
|
Neki/aiobotocore
|
0b71254446ef7ba6d93016d5a8b5b02c665a7b0b
|
[
"Apache-2.0"
] | 826 |
2016-02-14T11:31:25.000Z
|
2022-03-31T20:41:31.000Z
|
examples/sqs_queue_create.py
|
Neki/aiobotocore
|
0b71254446ef7ba6d93016d5a8b5b02c665a7b0b
|
[
"Apache-2.0"
] | 154 |
2016-04-28T16:27:33.000Z
|
2022-03-05T19:41:52.000Z
|
# Boto should get credentials from ~/.aws/credentials or the environment
import asyncio
from aiobotocore.session import get_session
async def go():
session = get_session()
async with session.create_client('sqs', region_name='us-west-2') as client:
print('Creating test_queue1')
response = await client.create_queue(QueueName='test_queue1')
queue_url = response['QueueUrl']
response = await client.list_queues()
print('Queue URLs:')
for queue_name in response.get('QueueUrls', []):
print(f' {queue_name}')
print(f'Deleting queue {queue_url}')
await client.delete_queue(QueueUrl=queue_url)
print('Done')
if __name__ == '__main__':
asyncio.run(go())
| 25.965517 | 79 | 0.661355 |
1df3aa3c66284441527ce80a79ca00d41ced19a0
| 3,195 |
py
|
Python
|
projecto/settings.py
|
bfaguiar/Venda-d-Garagem
|
6a3bc554afe302bb65a0d2e18ddf796e60998442
|
[
"MIT"
] | null | null | null |
projecto/settings.py
|
bfaguiar/Venda-d-Garagem
|
6a3bc554afe302bb65a0d2e18ddf796e60998442
|
[
"MIT"
] | null | null | null |
projecto/settings.py
|
bfaguiar/Venda-d-Garagem
|
6a3bc554afe302bb65a0d2e18ddf796e60998442
|
[
"MIT"
] | 1 |
2022-03-08T18:01:18.000Z
|
2022-03-08T18:01:18.000Z
|
"""
Django settings for projecto project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@yp05zoq6r3-l_e+ol8p^%nqe-!sr0$rzs^0!a_4xswxt$oi4l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app.apps.AppConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'projecto.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'app' / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'projecto.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'app/static')
| 25.357143 | 91 | 0.695149 |
d189923bb0e3eb39599153ba2ab5974d84e0220b
| 16,646 |
py
|
Python
|
tests/test_graph.py
|
usnistgov/corr-reprozip
|
17a7b614d859736a37cb09582c4ae90c29dd4ffb
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_graph.py
|
usnistgov/corr-reprozip
|
17a7b614d859736a37cb09582c4ae90c29dd4ffb
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_graph.py
|
usnistgov/corr-reprozip
|
17a7b614d859736a37cb09582c4ae90c29dd4ffb
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (C) 2014-2017 New York University
# This file is part of ReproZip which is released under the Revised BSD License
# See file LICENSE for full license details.
from __future__ import print_function, unicode_literals
import json
import os
from rpaths import Path
import sys
import unittest
from reprounzip.common import FILE_READ, FILE_WRITE, FILE_WDIR, FILE_STAT
from reprounzip.unpackers import graph
from reprounzip.unpackers.common import UsageError
from tests.common import make_database
class TestGraph(unittest.TestCase):
"""Generates graphs from a fabricated trace database."""
maxDiff = None
@classmethod
def setUpClass(cls):
if sys.version_info < (2, 7, 3):
raise unittest.SkipTest("Python version not supported by reprozip")
cls._trace = Path.tempdir(prefix='rpz_testdb_')
conn = make_database([
('proc', 0, None, False),
('open', 0, "/some/dir", True, FILE_WDIR),
('exec', 0, "/bin/sh", "/some/dir", "sh\0script_1\0"),
('open', 0, "/usr/share/1_one.pyc", False, FILE_READ),
('open', 0, "/some/dir/one", False, FILE_WRITE),
('exec', 0, "/usr/bin/python", "/some/dir", "python\0drive.py\0"),
('open', 0, "/some/dir/drive.py", False, FILE_READ),
('open', 0, "/some/dir/one", False, FILE_READ),
('open', 0, "/etc/2_two.cfg", False, FILE_READ),
('proc', 1, 0, False),
('open', 1, "/some/dir", True, FILE_WDIR),
('exec', 1, "/some/dir/experiment", "/some/dir", "experiment\0"),
('open', 1, "/some/dir/one", False, FILE_STAT),
('open', 1, "/usr/lib/2_one.so", False, FILE_READ),
('open', 1, "/some/dir/two", False, FILE_WRITE),
('exec', 0, "/usr/bin/wc", "/some/dir", "wc\0out.txt\0"),
('exit', 1),
('open', 0, "/some/dir/two", False, FILE_READ),
('exit', 0),
('proc', 2, None, False),
('open', 2, "/some/dir", True, FILE_WDIR),
('exec', 2, "/bin/sh", "/some/dir", "sh\0script_2\0"),
('proc', 3, 2, True),
('exit', 3),
('proc', 4, 2, False),
('open', 4, "/some/dir", True, FILE_WDIR),
('exec', 4, "/usr/bin/python", "/some/dir", "python\0-\0"),
('open', 4, "/some/dir/one", False, FILE_READ),
('open', 4, "/some/dir/thing", False, FILE_WRITE),
('exec', 2, "/some/dir/report", "/some/dir", "./report\0-v\0"),
('open', 2, "/some/dir/thing", False, FILE_READ),
('exit', 4),
('open', 2, "/some/dir/result", False, FILE_WRITE),
('exit', 2),
], cls._trace / 'trace.sqlite3')
conn.close()
with (cls._trace / 'config.yml').open('w', encoding='utf-8') as fp:
fp.write("""\
version: "1.1"
runs:
- id: first run
architecture: x86_64
argv: [sh, "script_1"]
binary: /some/dir/one
distribution: [debian, '8.0']
environ: {USER: remram}
exitcode: 0
uid: 1000
gid: 1000
hostname: test
workingdir: /user/dir
- architecture: x86_64
argv: ["sh", "script_2"]
binary: /some/dir/one
distribution: [debian, '8.0']
environ: {USER: remram}
exitcode: 0
uid: 1000
gid: 1000
hostname: test
workingdir: /user/dir
inputs_outputs:
- name: important
path: "/some/dir/one"
written_by_runs: [0]
read_by_runs: [1]
packages:
- name: pkg1
version: "1.0"
size: 10000
packfiles: true
files:
- "/usr/share/1_one.py"
- "/usr/share/1_two.py"
- "/usr/bin/wc"
- name: pkg2
version: "1.0"
size: 10000
packfiles: true
files:
- "/usr/lib/2_one.so"
- "/etc/2_two.cfg"
meta: {"section": "libs"}
- name: python
version: "2.7"
size: 5000000
packfiles: true
files:
- "/usr/bin/python"
meta: {"section": "python"}
- name: unused
version: "0.1"
size: 100
packfiles: true
files:
- "/an/unused/file"
other_files:
- "/bin/sh"
- "/usr/share/1_one.pyc"
- "/some/dir/drive.py"
- "/some/dir/experiment"
- "/some/dir/report"
""")
@classmethod
def tearDownClass(cls):
cls._trace.rmtree()
def do_dot_test(self, expected, **kwargs):
graph.Process._id_gen = 0
fd, target = Path.tempfile(prefix='rpz_testgraph_', suffix='.dot')
os.close(fd)
try:
graph.generate(target,
self._trace / 'config.yml',
self._trace / 'trace.sqlite3',
**kwargs)
if expected is False:
self.fail("DOT generation didn't fail as expected")
with target.open('r') as fp:
self.assertEqual(expected, fp.read())
except UsageError:
if expected is not False:
raise
finally:
target.remove()
def do_json_test(self, expected, **kwargs):
graph.Process._id_gen = 0
fd, target = Path.tempfile(prefix='rpz_testgraph_', suffix='.json')
os.close(fd)
try:
graph.generate(target,
self._trace / 'config.yml',
self._trace / 'trace.sqlite3',
graph_format='json', **kwargs)
if expected is False:
self.fail("JSON generation didn't fail as expected")
with target.open('r', encoding='utf-8') as fp:
obj = json.load(fp)
self.assertEqual(expected, obj)
except SystemExit:
if expected is not False:
raise
finally:
target.remove()
def do_tests(self, expected_dot, expected_json, **kwargs):
self.do_dot_test(expected_dot, **kwargs)
self.do_json_test(expected_json, **kwargs)
def test_simple(self):
self.do_tests(
"""\
digraph G {
rankdir=LR;
/* programs */
node [shape=box fontcolor=white fillcolor=black style="filled,rounded"];
subgraph cluster_run0 {
label="first run";
prog0 [label="/bin/sh (0)"];
prog1 [label="/usr/bin/python (0)"];
prog0 -> prog1 [label="exec"];
prog2 [label="/some/dir/experiment (1)"];
prog1 -> prog2 [label="fork+exec"];
prog3 [label="/usr/bin/wc (0)"];
prog1 -> prog3 [label="exec"];
}
subgraph cluster_run1 {
label="run1";
prog4 [label="/bin/sh (2)"];
prog5 [label="/bin/sh (3)",fillcolor="#666666"];
prog4 -> prog5 [label="thread"];
prog6 [label="/usr/bin/python (4)"];
prog4 -> prog6 [label="fork+exec"];
prog7 [label="/some/dir/report (2)"];
prog4 -> prog7 [label="exec"];
}
node [shape=ellipse fontcolor="#131C39" fillcolor="#C9D2ED"];
/* system packages */
subgraph cluster_pkg0 {
label="pkg1 1.0";
"/usr/bin/wc";
}
subgraph cluster_pkg1 {
label="pkg2 1.0";
"/etc/2_two.cfg";
"/usr/lib/2_one.so";
}
subgraph cluster_pkg2 {
label="python 2.7";
"/usr/bin/python";
}
/* other files */
"/bin/sh";
"/some/dir/drive.py";
"/some/dir/experiment";
"/some/dir/one" [fillcolor="#A3B4E0", label="important\\n/some/dir/one"];
"/some/dir/report";
"/some/dir/result";
"/some/dir/thing";
"/some/dir/two";
"/usr/share/1_one.pyc";
"/bin/sh" -> prog0 [style=bold, label="sh script_1"];
"/usr/share/1_one.pyc" -> prog0 [color="#8888CC"];
prog0 -> "/some/dir/one" [color="#000088"];
"/usr/bin/python" -> prog1 [style=bold, label="python drive.py"];
"/some/dir/drive.py" -> prog1 [color="#8888CC"];
"/some/dir/one" -> prog1 [color="#8888CC"];
"/etc/2_two.cfg" -> prog1 [color="#8888CC"];
"/some/dir/experiment" -> prog2 [style=bold, label="experiment"];
"/usr/lib/2_one.so" -> prog2 [color="#8888CC"];
prog2 -> "/some/dir/two" [color="#000088"];
"/usr/bin/wc" -> prog3 [style=bold, label="wc out.txt"];
"/some/dir/two" -> prog3 [color="#8888CC"];
"/bin/sh" -> prog4 [style=bold, label="sh script_2"];
"/usr/bin/python" -> prog6 [style=bold, label="python -"];
"/some/dir/one" -> prog6 [color="#8888CC"];
prog6 -> "/some/dir/thing" [color="#000088"];
"/some/dir/report" -> prog7 [style=bold, label="./report -v"];
"/some/dir/thing" -> prog7 [color="#8888CC"];
prog7 -> "/some/dir/result" [color="#000088"];
}
""",
{'packages': [{'name': 'pkg1', 'version': '1.0',
'files': ['/usr/bin/wc'],
'section': None},
{'name': 'pkg2', 'version': '1.0',
'files': ['/etc/2_two.cfg',
'/usr/lib/2_one.so'],
'section': 'libs'},
{'name': 'python', 'version': '2.7',
'files': ['/usr/bin/python'],
'section': 'python'}],
'other_files': ['/bin/sh',
'/some/dir/drive.py',
'/some/dir/experiment',
'/some/dir/one',
'/some/dir/report',
'/some/dir/result',
'/some/dir/thing',
'/some/dir/two',
'/usr/share/1_one.pyc'],
'inputs_outputs': [{'name': "important", 'path': "/some/dir/one",
'written_by_runs': [0], 'read_by_runs': [1]}],
'runs': [{'name': "first run",
'processes': [
{'name': '0',
'long_name': 'sh (0)',
'description': '/bin/sh\n0',
'argv': ['sh', 'script_1'],
'start_time': 0,
'exit_time': 5,
'is_thread': False,
'parent': None,
'reads': ['/bin/sh', '/usr/share/1_one.pyc'],
'writes': ['/some/dir/one']},
{'name': '0',
'long_name': 'python (0)',
'description': '/usr/bin/python\n0',
'argv': ['python', 'drive.py'],
'start_time': 5,
'exit_time': 15,
'is_thread': False,
'parent': [0, 'exec'],
'reads': ['/usr/bin/python',
'/some/dir/drive.py',
'/some/dir/one',
'/etc/2_two.cfg'],
'writes': []},
{'name': '1',
'long_name': 'experiment (1)',
'description': '/some/dir/experiment\n1',
'argv': ['experiment'],
'start_time': 9,
'exit_time': 16,
'is_thread': False,
'parent': [1, 'fork+exec'],
'reads': ['/some/dir/experiment',
'/usr/lib/2_one.so'],
'writes': ['/some/dir/two']},
{'name': '0',
'long_name': 'wc (0)',
'description': '/usr/bin/wc\n0',
'argv': ['wc', 'out.txt'],
'start_time': 15,
'exit_time': 18,
'is_thread': False,
'parent': [1, 'exec'],
'reads': ['/usr/bin/wc', '/some/dir/two'],
'writes': []}]},
{'name': "run1",
'processes': [
{'name': '2',
'long_name': 'sh (2)',
'description': '/bin/sh\n2',
'argv': ['sh', 'script_2'],
'start_time': 19,
'exit_time': 29,
'is_thread': False,
'parent': None,
'reads': ['/bin/sh'],
'writes': []},
{'name': '3',
'long_name': 'sh (3)',
'description': '/bin/sh\n3',
'argv': ['sh', 'script_2'],
'start_time': 22,
'exit_time': 23,
'is_thread': True,
'parent': [0, 'fork'],
'reads': [],
'writes': []},
{'name': '4',
'long_name': 'python (4)',
'description': '/usr/bin/python\n4',
'argv': ['python', '-'],
'start_time': 24,
'exit_time': 31,
'is_thread': False,
'parent': [0, 'fork+exec'],
'reads': ['/usr/bin/python', '/some/dir/one'],
'writes': ['/some/dir/thing']},
{'name': '2',
'long_name': 'report (2)',
'description': '/some/dir/report\n2',
'argv': ['./report', '-v'],
'start_time': 29,
'exit_time': 33,
'is_thread': False,
'parent': [0, 'exec'],
'reads': ['/some/dir/report', '/some/dir/thing'],
'writes': ['/some/dir/result']}]}]})
def test_collapsed_packages(self):
self.do_tests(
"""\
digraph G {
rankdir=LR;
/* programs */
node [shape=box fontcolor=white fillcolor=black style="filled,rounded"];
subgraph cluster_run0 {
label="first run";
prog0 [label="/bin/sh (0)"];
prog1 [label="/usr/bin/python (0)"];
prog0 -> prog1 [label="exec"];
prog2 [label="/some/dir/experiment (1)"];
prog1 -> prog2 [label="fork+exec"];
prog3 [label="/usr/bin/wc (0)"];
prog1 -> prog3 [label="exec"];
}
subgraph cluster_run1 {
label="run1";
prog4 [label="/bin/sh (2)"];
prog5 [label="/bin/sh (3)",fillcolor="#666666"];
prog4 -> prog5 [label="thread"];
prog6 [label="/usr/bin/python (4)"];
prog4 -> prog6 [label="fork+exec"];
prog7 [label="/some/dir/report (2)"];
prog4 -> prog7 [label="exec"];
}
node [shape=ellipse fontcolor="#131C39" fillcolor="#C9D2ED"];
/* system packages */
"pkg pkg1" [shape=box,label="pkg1 1.0"];
"pkg pkg2" [shape=box,label="pkg2 1.0"];
"pkg python" [shape=box,label="python 2.7"];
/* other files */
"/bin/sh";
"/some/dir/drive.py";
"/some/dir/experiment";
"/some/dir/one" [fillcolor="#A3B4E0", label="important\\n/some/dir/one"];
"/some/dir/report";
"/some/dir/result";
"/some/dir/thing";
"/some/dir/two";
"/bin/sh" -> prog0 [style=bold, label="sh script_1"];
"pkg pkg1" -> prog0 [color="#8888CC"];
prog0 -> "/some/dir/one" [color="#000088"];
"pkg python" -> prog1 [style=bold, label="python drive.py"];
"/some/dir/drive.py" -> prog1 [color="#8888CC"];
"/some/dir/one" -> prog1 [color="#8888CC"];
"pkg pkg2" -> prog1 [color="#8888CC"];
"/some/dir/experiment" -> prog2 [style=bold, label="experiment"];
"pkg pkg2" -> prog2 [color="#8888CC"];
prog2 -> "/some/dir/two" [color="#000088"];
"pkg pkg1" -> prog3 [style=bold, label="wc out.txt"];
"/some/dir/two" -> prog3 [color="#8888CC"];
"/bin/sh" -> prog4 [style=bold, label="sh script_2"];
"pkg python" -> prog6 [style=bold, label="python -"];
"/some/dir/one" -> prog6 [color="#8888CC"];
prog6 -> "/some/dir/thing" [color="#000088"];
"/some/dir/report" -> prog7 [style=bold, label="./report -v"];
"/some/dir/thing" -> prog7 [color="#8888CC"];
prog7 -> "/some/dir/result" [color="#000088"];
}
""",
False,
level_pkgs='package',
regex_replaces=[('.pyc$', '.py')])
| 37.406742 | 79 | 0.453562 |
de2a48c8c397ca42027ad123fc42a86d807d34fd
| 714 |
py
|
Python
|
Chapter 04/graph.py
|
bpbpublications/Python-Quick-Interview-Guide
|
ab4ff3e670b116a4db6b9e1f0ccba8424640704d
|
[
"MIT"
] | 1 |
2021-05-14T19:53:41.000Z
|
2021-05-14T19:53:41.000Z
|
Chapter 04/graph.py
|
bpbpublications/Python-Quick-Interview-Guide
|
ab4ff3e670b116a4db6b9e1f0ccba8424640704d
|
[
"MIT"
] | null | null | null |
Chapter 04/graph.py
|
bpbpublications/Python-Quick-Interview-Guide
|
ab4ff3e670b116a4db6b9e1f0ccba8424640704d
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
class Graph:
def __init__(self):
self.graph = defaultdict(list)#Create empty dictionary
def insertEdge(self, v1, v2):
self.graph[v1].append(v2)#Add v2 to list of v1
self.graph[v2].append(v1)#Add v1 to list of v2
def printGraph(self):
for node in self.graph:
print(node,':',end=' ')#print vertex-id:
for v in self.graph[node]:#print every vertex in the list
print(v,end=' ')
print('\n')#print new line at end of the list
#Driver code
g = Graph()
g.insertEdge('a', 'b')
g.insertEdge('b', 'c')
g.insertEdge('b', 'd')
g.insertEdge('d', 'e')
g.printGraph()
| 28.56 | 70 | 0.581232 |
9ae4b726f5a051910641ea70d76b9647f85bbcaa
| 4,928 |
py
|
Python
|
scripts/irods/test/test_quotas.py
|
tempoz/irods
|
a64c5e9cfb86af725f8f20ae940591adef8e02f0
|
[
"BSD-3-Clause"
] | 1 |
2020-05-31T17:00:37.000Z
|
2020-05-31T17:00:37.000Z
|
scripts/irods/test/test_quotas.py
|
tempoz/irods
|
a64c5e9cfb86af725f8f20ae940591adef8e02f0
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/irods/test/test_quotas.py
|
tempoz/irods
|
a64c5e9cfb86af725f8f20ae940591adef8e02f0
|
[
"BSD-3-Clause"
] | 2 |
2015-10-29T03:37:30.000Z
|
2015-12-16T15:09:14.000Z
|
import inspect
import os
import re
import sys
import time
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
from . import resource_suite
from .. import lib
from .. import paths
from ..core_file import temporary_core_file
from ..configuration import IrodsConfig
from .rule_texts_for_tests import rule_texts
class Test_Quotas(resource_suite.ResourceBase, unittest.TestCase):
plugin_name = IrodsConfig().default_rule_engine_plugin
class_name = 'Test_Quotas'
def setUp(self):
super(Test_Quotas, self).setUp()
def tearDown(self):
super(Test_Quotas, self).tearDown()
def test_iquota__3044(self):
filename_1 = 'test_iquota__3044_1'
filename_2 = 'test_iquota__3044_2'
with temporary_core_file() as core:
time.sleep(1) # remove once file hash fix is committed #2279
core.add_rule(rule_texts[self.plugin_name][self.class_name][inspect.currentframe().f_code.co_name])
time.sleep(1) # remove once file hash fix is committed #2279
for quotatype in [['suq',self.admin.username], ['sgq','public']]: # user and group
for quotaresc in [self.testresc, 'total']: # resc and total
cmd = 'iadmin {0} {1} {2} 10000000'.format(quotatype[0], quotatype[1], quotaresc) # set high quota
self.admin.assert_icommand(cmd.split())
cmd = 'iquota'
self.admin.assert_icommand(cmd.split(), 'STDOUT_SINGLELINE', 'Nearing quota') # not over yet
lib.make_file(filename_1, 1024, contents='arbitrary')
cmd = 'iput -R {0} {1}'.format(self.testresc, filename_1) # should succeed
self.admin.assert_icommand(cmd.split())
cmd = 'iadmin cu' # calculate, update db
self.admin.assert_icommand(cmd.split())
cmd = 'iquota'
self.admin.assert_icommand(cmd.split(), 'STDOUT_SINGLELINE', 'Nearing quota') # not over yet
cmd = 'iadmin {0} {1} {2} 40'.format(quotatype[0], quotatype[1], quotaresc) # set low quota
self.admin.assert_icommand(cmd.split())
cmd = 'iquota'
self.admin.assert_icommand(cmd.split(), 'STDOUT_SINGLELINE', 'OVER QUOTA') # confirm it's over
lib.make_file(filename_2, 1024, contents='arbitrary')
cmd = 'iput -R {0} {1}'.format(self.testresc, filename_2) # should fail
self.admin.assert_icommand(cmd.split(), 'STDERR_SINGLELINE', 'SYS_RESC_QUOTA_EXCEEDED')
cmd = 'iadmin {0} {1} {2} 0'.format(quotatype[0], quotatype[1], quotaresc) # remove quota
self.admin.assert_icommand(cmd.split())
cmd = 'iadmin cu' # update db
self.admin.assert_icommand(cmd.split())
cmd = 'iput -R {0} {1}'.format(self.testresc, filename_2) # should succeed again
self.admin.assert_icommand(cmd.split())
cmd = 'irm -rf {0}'.format(filename_1) # clean up
self.admin.assert_icommand(cmd.split())
cmd = 'irm -rf {0}'.format(filename_2) # clean up
self.admin.assert_icommand(cmd.split())
time.sleep(2) # remove once file hash fix is commited #2279
def test_iquota_empty__3048(self):
cmd = 'iadmin suq' # no arguments
self.admin.assert_icommand(cmd.split(), 'STDERR_SINGLELINE', 'ERROR: missing username parameter') # usage information
cmd = 'iadmin sgq' # no arguments
self.admin.assert_icommand(cmd.split(), 'STDERR_SINGLELINE', 'ERROR: missing group name parameter') # usage information
def test_iquota_u_updates_usage__issue_3508(self):
filename = 'test_quota_u_updates_usage__issue_3508'
lib.make_file(filename, 1024, contents='arbitrary')
self.admin.assert_icommand(['iadmin', 'suq', self.admin.username, 'demoResc', '10000000'])
self.admin.assert_icommand(['iput', filename])
self.admin.assert_icommand(['iadmin', 'cu'])
# In 4.2.0, iadmin cu does not actually update the r_quota_usage table, so will remain at -10,000,000
# When fixed, will show the actual value, and so the string below will not match and the assert will fail
self.admin.assert_icommand_fail(['iquota', '-u', self.admin.username], 'STDOUT_SINGLELINE', 'Over: -10,000,000 (-10 million) bytes')
def test_filter_out_groups_when_selecting_user__issue_3507(self):
self.admin.assert_icommand(['igroupadmin', 'mkgroup', 'test_group_3507'])
# Attempt to set user quota passing in the name of a group; should fail
self.admin.assert_icommand(['iadmin', 'suq', 'test_group_3507', 'demoResc', '10000000'], 'STDERR_SINGLELINE', 'CAT_INVALID_USER')
| 54.755556 | 141 | 0.630276 |
66f74a5f805188c2af606948e61064f9646f8d44
| 18,044 |
py
|
Python
|
test/fake_account_client.py
|
daka1510/qiskit-ibm-provider
|
74fdae4457e6f15ba445a8742a25ac7a714ea51b
|
[
"Apache-2.0"
] | null | null | null |
test/fake_account_client.py
|
daka1510/qiskit-ibm-provider
|
74fdae4457e6f15ba445a8742a25ac7a714ea51b
|
[
"Apache-2.0"
] | null | null | null |
test/fake_account_client.py
|
daka1510/qiskit-ibm-provider
|
74fdae4457e6f15ba445a8742a25ac7a714ea51b
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Fake AccountClient."""
import copy
# TODO This can probably be merged with the one in test_ibm_job_states
import time
import uuid
import warnings
from concurrent.futures import ThreadPoolExecutor, wait
from datetime import timedelta, datetime
from random import randrange
from qiskit.test.mock.backends.poughkeepsie.fake_poughkeepsie import FakePoughkeepsie
from qiskit_ibm_provider.api.exceptions import (
RequestsApiError,
UserTimeoutExceededError,
)
from qiskit_ibm_provider.apiconstants import ApiJobStatus, API_JOB_FINAL_STATES
VALID_RESULT_RESPONSE = {
"backend_name": "ibmqx2",
"backend_version": "1.1.1",
"job_id": "XC1323XG2",
"qobj_id": "Experiment1",
"success": True,
"results": [],
}
"""A valid job result response."""
VALID_RESULT = {
"header": {
"name": "Bell state",
"memory_slots": 2,
"creg_sizes": [["c", 2]],
"clbit_labels": [["c", 0], ["c", 1]],
"qubit_labels": [["q", 0], ["q", 1]],
},
"shots": 1024,
"status": "DONE",
"success": True,
"data": {"counts": {"0x0": 484, "0x3": 540}},
}
API_STATUS_TO_INT = {
ApiJobStatus.CREATING: 0,
ApiJobStatus.VALIDATING: 1,
ApiJobStatus.QUEUED: 2,
ApiJobStatus.RUNNING: 3,
ApiJobStatus.COMPLETED: 4,
ApiJobStatus.ERROR_RUNNING_JOB: 4,
ApiJobStatus.ERROR_VALIDATING_JOB: 4,
ApiJobStatus.CANCELLED: 4,
}
class BaseFakeJob:
"""Base class for faking a remote job."""
_job_progress = [
ApiJobStatus.CREATING,
ApiJobStatus.VALIDATING,
ApiJobStatus.QUEUED,
ApiJobStatus.RUNNING,
ApiJobStatus.COMPLETED,
]
def __init__(
self,
executor,
job_id,
qobj,
backend_name,
job_tags=None,
job_name=None,
experiment_id=None,
run_mode=None,
progress_time=0.5,
**kwargs,
):
"""Initialize a fake job."""
self._job_id = job_id
self._status = ApiJobStatus.CREATING
self.qobj = qobj
self._result = None
self._backend_name = backend_name
self._job_tags = job_tags
self._job_name = job_name
self._experiment_id = experiment_id
self._creation_date = datetime.now()
self._run_mode = run_mode
self._queue_pos = kwargs.pop("queue_pos", "auto")
self._comp_time = kwargs.pop("est_completion", "auto")
self._queue_info = None
self._progress_time = progress_time
self._future = executor.submit(self._auto_progress)
def _auto_progress(self):
"""Automatically update job status."""
for status in self._job_progress:
time.sleep(self._progress_time)
self._status = status
if self._status == ApiJobStatus.COMPLETED:
self._save_result()
elif self._status == ApiJobStatus.ERROR_RUNNING_JOB:
self._save_bad_result()
def _save_result(self):
new_result = copy.deepcopy(VALID_RESULT_RESPONSE)
for _ in range(len(self.qobj["experiments"])):
valid_result = copy.deepcopy(VALID_RESULT)
counts = randrange(1024)
valid_result["data"]["counts"] = {"0x0": counts, "0x3": 1024 - counts}
new_result["results"].append(valid_result)
new_result["job_id"] = self._job_id
new_result["backend_name"] = self._backend_name
self._result = new_result
def _save_bad_result(self):
new_result = copy.deepcopy(VALID_RESULT_RESPONSE)
new_result["job_id"] = self._job_id
new_result["backend_name"] = self._backend_name
new_result["success"] = False
new_result["error"] = {"message": "Kaboom", "code": 1234}
self._result = new_result
def data(self):
"""Return job data."""
status = self._status
data = {
"job_id": self._job_id,
"kind": "q-object",
"status": status.value,
"creation_date": self._creation_date.isoformat(),
"_backend_info": {"name": self._backend_name},
"client_info": {"qiskit": "0.23.5"},
}
if self._job_tags:
data["tags"] = self._job_tags.copy()
if self._job_name:
data["name"] = self._job_name
if self._experiment_id:
data["experiment_id"] = self._experiment_id
if status == ApiJobStatus.ERROR_VALIDATING_JOB:
data["error"] = {"message": "Validation failed.", "code": 1234}
if (
status in [ApiJobStatus.RUNNING] + list(API_JOB_FINAL_STATES)
and self._run_mode
):
data["run_mode"] = self._run_mode
time_per_step = {}
timestamp = self._creation_date
for api_stat in API_STATUS_TO_INT: # pylint: disable=consider-using-dict-items
if API_STATUS_TO_INT[status] > API_STATUS_TO_INT[api_stat]:
time_per_step[api_stat.value] = timestamp.isoformat()
timestamp += timedelta(seconds=30)
elif status == api_stat:
time_per_step[api_stat.value] = timestamp.isoformat()
timestamp += timedelta(seconds=30)
data["time_per_step"] = time_per_step
return data
def _get_info_queue(self):
self._queue_info = {
"status": "PENDING_IN_QUEUE",
"position": randrange(1, 10)
if self._queue_pos == "auto"
else self._queue_pos,
}
if self._queue_info["position"] is None:
return self._queue_info
est_comp_ts = (
self._creation_date + timedelta(minutes=10 * self._queue_info["position"])
if self._comp_time == "auto"
else self._comp_time
)
if est_comp_ts is None:
return self._queue_info
self._queue_info["estimated_complete_time"] = est_comp_ts.isoformat()
self._queue_info["estimated_start_time"] = (
est_comp_ts - timedelta(minutes=20)
).isoformat()
return self._queue_info
def cancel(self):
"""Cancel the job."""
self._future.cancel()
wait([self._future])
self._status = ApiJobStatus.CANCELLED
self._result = None
def result(self):
"""Return job result."""
if not self._result:
raise RequestsApiError("Result is not available")
return self._result
def status_data(self):
"""Return job status data, including queue info."""
status = self._status
data = {"status": status.value}
if status == ApiJobStatus.QUEUED:
data["info_queue"] = self._get_info_queue()
return data
def status(self):
"""Return job status."""
return self._status
def name(self):
"""Return job name."""
return self._job_name
class CancelableFakeJob(BaseFakeJob):
"""Fake job that can be canceled."""
_job_progress = [
ApiJobStatus.CREATING,
ApiJobStatus.VALIDATING,
ApiJobStatus.RUNNING,
]
class NewFieldFakeJob(BaseFakeJob):
"""Fake job that contains additional fields."""
def data(self):
"""Return job data."""
data = super().data()
data["new_field"] = "foo"
return data
class MissingFieldFakeJob(BaseFakeJob):
"""Fake job that does not contain required fields."""
def data(self):
"""Return job data."""
data = super().data()
del data["job_id"]
return data
class FailedFakeJob(BaseFakeJob):
"""Fake job that fails."""
_job_progress = [ApiJobStatus.CREATING, ApiJobStatus.VALIDATING]
def __init__(self, *args, **kwargs):
# failure_type can be "validation", "result", or "partial"
self._failure_type = kwargs.pop("failure_type", "validation")
self._job_progress = FailedFakeJob._job_progress.copy()
if self._failure_type == "validation":
self._job_progress.append(ApiJobStatus.ERROR_VALIDATING_JOB)
else:
self._job_progress.extend(
[ApiJobStatus.RUNNING, ApiJobStatus.ERROR_RUNNING_JOB]
)
super().__init__(*args, **kwargs)
def _save_bad_result(self):
if self._failure_type != "partial":
super()._save_bad_result()
return
new_result = copy.deepcopy(VALID_RESULT_RESPONSE)
new_result["job_id"] = self._job_id
new_result["backend_name"] = self._backend_name
new_result["success"] = False
# Good first result.
valid_result = copy.deepcopy(VALID_RESULT)
counts = randrange(1024)
valid_result["data"]["counts"] = {"0x0": counts, "0x3": 1024 - counts}
new_result["results"].append(valid_result)
for _ in range(1, len(self.qobj["experiments"])):
valid_result = copy.deepcopy(VALID_RESULT)
valid_result["success"] = False
valid_result["status"] = "This circuit failed."
new_result["results"].append(valid_result)
self._result = new_result
class FixedStatusFakeJob(BaseFakeJob):
"""Fake job that stays in a specific status."""
def __init__(self, *args, **kwargs):
self._fixed_status = kwargs.pop("fixed_status")
super().__init__(*args, **kwargs)
def _auto_progress(self):
"""Automatically update job status."""
for status in self._job_progress:
time.sleep(0.5)
self._status = status
if status == self._fixed_status:
break
if self._status == ApiJobStatus.COMPLETED:
self._save_result()
class BaseFakeAccountClient:
"""Base class for faking the AccountClient."""
def __init__(
self,
job_limit=-1,
job_class=BaseFakeJob,
job_kwargs=None,
props_count=None,
queue_positions=None,
est_completion=None,
run_mode=None,
):
"""Initialize a fake account client."""
self._jobs = {}
self._results_retrieved = set()
self._job_limit = job_limit
self._executor = ThreadPoolExecutor()
self._job_class = job_class
if isinstance(self._job_class, list):
self._job_class.reverse()
self._job_kwargs = job_kwargs or {}
self._props_count = props_count or 0
self._props_date = datetime.now().isoformat()
self._queue_positions = queue_positions.copy() if queue_positions else []
self._queue_positions.reverse()
self._est_completion = est_completion.copy() if est_completion else []
self._est_completion.reverse()
self._run_mode = run_mode
self._default_job_class = BaseFakeJob
def list_jobs_statuses(self, limit, skip, descending=True, extra_filter=None):
"""Return a list of statuses of jobs."""
# pylint: disable=unused-argument
extra_filter = extra_filter or {}
if all(fil in extra_filter for fil in ["creationDate", "id"]):
return {}
tag = extra_filter.get("tags", None)
all_job_data = []
for job in list(self._jobs.values())[skip : skip + limit]:
job_data = job.data()
if tag is None or tag in job_data["tags"]:
all_job_data.append(job_data)
if not descending:
all_job_data.reverse()
return all_job_data
def job_submit(
self,
backend_name,
qobj_dict,
job_name,
job_tags,
experiment_id,
*_args,
**_kwargs,
):
"""Submit a Qobj to a device."""
if self._job_limit != -1 and self._unfinished_jobs() >= self._job_limit:
raise RequestsApiError(
"400 Client Error: Bad Request for url: <url>. Reached "
"maximum number of concurrent jobs, Error code: 3458."
)
new_job_id = uuid.uuid4().hex
if isinstance(self._job_class, list):
job_class = (
self._job_class.pop() if self._job_class else self._default_job_class
)
else:
job_class = self._job_class
job_kwargs = copy.copy(self._job_kwargs)
if self._queue_positions:
job_kwargs["queue_pos"] = self._queue_positions.pop()
if self._est_completion:
job_kwargs["est_completion"] = self._est_completion.pop()
run_mode = self._run_mode
if run_mode == "dedicated_once":
run_mode = "dedicated"
self._run_mode = "fairshare"
new_job = job_class(
executor=self._executor,
job_id=new_job_id,
qobj=qobj_dict,
backend_name=backend_name,
job_tags=job_tags,
job_name=job_name,
experiment_id=experiment_id,
run_mode=run_mode,
**job_kwargs,
)
self._jobs[new_job_id] = new_job
return new_job.data()
def job_download_qobj(self, job_id, *_args, **_kwargs):
"""Retrieve and return a Qobj."""
return copy.deepcopy(self._get_job(job_id).qobj)
def job_result(self, job_id, *_args, **_kwargs):
"""Return a random job result."""
if job_id in self._results_retrieved:
warnings.warn(f"Result already retrieved for job {job_id}")
self._results_retrieved.add(job_id)
return self._get_job(job_id).result()
def job_get(self, job_id, *_args, **_kwargs):
"""Return information about a job."""
return self._get_job(job_id).data()
def job_status(self, job_id, *_args, **_kwargs):
"""Return the status of a job."""
return self._get_job(job_id).status_data()
def job_final_status(self, job_id, *_args, **_kwargs):
"""Wait until the job progress to a final state."""
job = self._get_job(job_id)
status = job.status()
while status not in API_JOB_FINAL_STATES:
time.sleep(0.5)
status_data = job.status_data()
status = ApiJobStatus(status_data["status"])
if _kwargs.get("status_queue", None):
data = {"status": status.value}
if status is ApiJobStatus.QUEUED:
data["infoQueue"] = {"status": "PENDING_IN_QUEUE", "position": 1}
_kwargs["status_queue"].put(status_data)
return self.job_status(job_id)
def job_properties(self, *_args, **_kwargs):
"""Return the backend properties of a job."""
props = FakePoughkeepsie().properties().to_dict()
if self._props_count > 0:
self._props_count -= 1
new_dt = datetime.now() + timedelta(hours=randrange(300))
self._props_date = new_dt.isoformat()
props["last_update_date"] = self._props_date
return props
def job_cancel(self, job_id, *_args, **_kwargs):
"""Submit a request for cancelling a job."""
self._get_job(job_id).cancel()
return {"cancelled": True}
def backend_job_limit(self, *_args, **_kwargs):
"""Return the job limit for the backend."""
return {"maximumJobs": self._job_limit, "runningJobs": self._unfinished_jobs()}
def job_update_attribute(self, job_id, attr_name, attr_value, *_args, **_kwargs):
"""Update the specified job attribute with the given value."""
job = self._get_job(job_id)
if attr_name == "name":
job._job_name = attr_value
if attr_name == "tags":
job._job_tags = attr_value.copy()
return {attr_name: attr_value}
def tear_down(self):
"""Clean up job threads."""
for job_id in list(self._jobs.keys()):
try:
self._jobs[job_id].cancel()
except KeyError:
pass
def _unfinished_jobs(self):
"""Return the number of unfinished jobs."""
return sum(
1 for job in self._jobs.values() if job.status() not in API_JOB_FINAL_STATES
)
def _get_job(self, job_id):
"""Return job if found."""
if job_id not in self._jobs:
raise RequestsApiError("Job not found. Error code: 3250.")
return self._jobs[job_id]
class JobSubmitFailClient(BaseFakeAccountClient):
"""Fake AccountClient used to fail a job submit."""
def __init__(self, failed_indexes):
"""JobSubmitFailClient constructor."""
if not isinstance(failed_indexes, list):
failed_indexes = [failed_indexes]
self._failed_indexes = failed_indexes
self._job_count = -1
super().__init__()
def job_submit(self, *_args, **_kwargs): # pylint: disable=arguments-differ
"""Failing job submit."""
self._job_count += 1
if self._job_count in self._failed_indexes:
raise RequestsApiError("Job submit failed!")
return super().job_submit(*_args, **_kwargs)
class JobTimeoutClient(BaseFakeAccountClient):
"""Fake AccountClient used to fail a job submit."""
def __init__(self, *args, max_fail_count=-1, **kwargs):
"""JobTimeoutClient constructor."""
self._fail_count = max_fail_count
super().__init__(*args, **kwargs)
def job_final_status(self, job_id, *_args, **_kwargs):
"""Wait until the job progress to a final state."""
if self._fail_count != 0:
self._fail_count -= 1
raise UserTimeoutExceededError("Job timed out!")
return super().job_final_status(job_id, *_args, **_kwargs)
| 33.853659 | 88 | 0.609787 |
da8867e7279143c3fcf70ba5902655485e39014a
| 2,818 |
py
|
Python
|
examples/fedprox/generate_configs.py
|
wynterl/federated-learning-lib
|
5d6cc0a5f4a45d97525ff2dec328b3901b71b7a3
|
[
"IBM-pibs"
] | null | null | null |
examples/fedprox/generate_configs.py
|
wynterl/federated-learning-lib
|
5d6cc0a5f4a45d97525ff2dec328b3901b71b7a3
|
[
"IBM-pibs"
] | null | null | null |
examples/fedprox/generate_configs.py
|
wynterl/federated-learning-lib
|
5d6cc0a5f4a45d97525ff2dec328b3901b71b7a3
|
[
"IBM-pibs"
] | 1 |
2021-05-11T05:09:30.000Z
|
2021-05-11T05:09:30.000Z
|
import os
from importlib import import_module
import examples.datahandlers as datahandlers
from examples.fedprox.model import MyModel
def get_fusion_config():
fusion = {
'name': 'IterAvgFusionHandler',
'path': 'ibmfl.aggregator.fusion.iter_avg_fusion_handler'
}
return fusion
def get_local_training_config():
local_training_handler = {
'name': 'LocalTrainingHandler',
'path': 'ibmfl.party.training.local_training_handler'
}
return local_training_handler
def get_hyperparams(model='tf'):
hyperparams = {
'global': {
'rounds': 10,
'termination_accuracy': 0.9,
'max_timeout': 60
},
'local': {
'training': {
'epochs': 3
}
}
}
return hyperparams
def get_data_handler_config(party_id, dataset, folder_data, is_agg=False, model='tf'):
SUPPORTED_DATASETS = ['mnist']
if dataset in SUPPORTED_DATASETS:
if dataset == 'mnist':
dataset = 'mnist_tf'
data = datahandlers.get_datahandler_config(
dataset, folder_data, party_id, is_agg)
else:
raise Exception(
"The dataset {} is a wrong combination for fusion/model".format(dataset))
return data
def get_model_config(folder_configs, dataset, is_agg=False, party_id=0, model='tf'):
if is_agg:
return None
if model is None or model is 'default' :
model = 'tf'
# Create an instance of the model
model = MyModel()
# save model to json
config = model.to_json()
if not os.path.exists(folder_configs):
os.makedirs(folder_configs)
fname = os.path.join(folder_configs, 'model_architecture.json')
outfile = open(fname, 'w')
outfile.write(config)
outfile.close()
spec = {
'model_name': 'tf-cnn',
'model_architecture': fname,
'custom_objects': [
{
'key': 'MyModel',
'value': 'MyModel',
'path': 'examples.fedprox.model'
}
],
'compile_model_options': {
'optimizer': {
'value': 'PerturbedGradientDescent',
'path': 'ibmfl.util.fedprox.optimizer',
'args': {
'learning_rate': 0.1,
'mu': 0.01,
}
},
'loss': {
'value': 'SparseCategoricalCrossentropy',
'path': 'tensorflow.keras.losses',
'args': {
'from_logits': 'true'
}
},
'metrics': 'acc'
}
}
model = {
'name': 'TensorFlowFLModel',
'path': 'ibmfl.model.tensorflow_fl_model',
'spec': spec
}
return model
| 25.853211 | 86 | 0.542583 |
42801709317b853554b2d62714ca9f85036ec05c
| 576 |
py
|
Python
|
tests/test001/hud.py
|
marza-animation-planet/das
|
1c7460dfdd5f138d8317c72900e90b23c0c28c7b
|
[
"MIT"
] | 4 |
2018-11-19T01:36:01.000Z
|
2022-02-28T03:41:12.000Z
|
tests/test001/hud.py
|
marza-animation-planet/das
|
1c7460dfdd5f138d8317c72900e90b23c0c28c7b
|
[
"MIT"
] | 1 |
2021-12-26T11:57:07.000Z
|
2022-03-16T07:18:01.000Z
|
tests/test001/hud.py
|
marza-animation-planet/das
|
1c7460dfdd5f138d8317c72900e90b23c0c28c7b
|
[
"MIT"
] | 2 |
2019-03-30T10:28:12.000Z
|
2022-03-04T17:58:39.000Z
|
__all__ = ["Attribute"]
class Attribute(object):
def __init__(self, attr=""):
super(Attribute, self).__init__()
self.attr = attr
def copy(self):
return Attribute(self.attr)
def value_to_string(self):
return self.attr
def string_to_value(self, s):
self.attr = s
# def __str__(self):
# return "Attribute('%s')" % self.attr
def __repr__(self):
return "Attribute('%s')" % self.attr
def __cmp__(self, oth):
s0 = str(self)
s1 = str(oth)
return (-1 if (s0 < s1) else (0 if (s0 == s1) else 1))
| 19.862069 | 60 | 0.581597 |
de741fd18d641d6a5fca282fd546a0d0a3eecf42
| 8,655 |
py
|
Python
|
airbyte-integrations/connectors/source-stock-ticker-api-tutorial/source.py
|
golf-canada/airbyte
|
a81b183a6b62d6bb4256347aaf39a3ada061aabe
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-stock-ticker-api-tutorial/source.py
|
golf-canada/airbyte
|
a81b183a6b62d6bb4256347aaf39a3ada061aabe
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-stock-ticker-api-tutorial/source.py
|
golf-canada/airbyte
|
a81b183a6b62d6bb4256347aaf39a3ada061aabe
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2020 Airbyte
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse # helps parse commandline arguments
import json
import sys
import os
import requests
import datetime
def read(config, catalog):
# Assert required configuration was provided
if "api_key" not in config or "stock_ticker" not in config:
log("Input config must contain the properties 'api_key' and 'stock_ticker'")
sys.exit(1)
# Find the stock_prices stream if it is present in the input catalog
stock_prices_stream = None
for configured_stream in catalog["streams"]:
if configured_stream["stream"]["name"] == "stock_prices":
stock_prices_stream = configured_stream
if stock_prices_stream is None:
log("No streams selected")
return
# We only support full_refresh at the moment, so verify the user didn't ask for another sync mode
if stock_prices_stream["sync_mode"] != "full_refresh":
log("This connector only supports full refresh syncs! (for now)")
sys.exit(1)
# If we've made it this far, all the configuration is good and we can pull the last 7 days of market data
api_key = config["api_key"]
stock_ticker = config["stock_ticker"]
response = _call_api(f"/stock/{stock_ticker}/chart/7d", api_key)
if response.status_code != 200:
# In a real scenario we'd handle this error better :)
log("Failure occurred when calling IEX API")
sys.exit(1)
else:
# Sort the stock prices ascending by date then output them one by one as AirbyteMessages
prices = sorted(response.json(), key=lambda record: datetime.datetime.strptime(record["date"], '%Y-%m-%d'))
for price in prices:
data = {"date": price["date"], "stock_ticker": price["symbol"], "price": price["close"]}
record = {"stream": "stock_prices", "data": data, "emitted_at": int(datetime.datetime.now().timestamp()) * 1000}
output_message = {"type": "RECORD", "record": record}
print(json.dumps(output_message))
def read_json(filepath):
with open(filepath, "r") as f:
return json.loads(f.read())
def _call_api(endpoint, token):
return requests.get("https://cloud.iexapis.com/v1/" + endpoint + "?token=" + token)
def check(config):
# Assert required configuration was provided
if "api_key" not in config or "stock_ticker" not in config:
log("Input config must contain the properties 'api_key' and 'stock_ticker'")
sys.exit(1)
else:
# Validate input configuration by attempting to get the price of the input stock ticker for the previous day
response = _call_api(endpoint="stock/" + config["stock_ticker"] + "/previous", token=config["api_key"])
if response.status_code == 200:
result = {"status": "SUCCEEDED"}
elif response.status_code == 403:
# HTTP code 403 means authorization failed so the API key is incorrect
result = {"status": "FAILED", "message": "API Key is incorrect."}
else:
# Consider any other code a "generic" failure and tell the user to make sure their config is correct.
result = {"status": "FAILED", "message": "Input configuration is incorrect. Please verify the input stock ticker and API key."}
# Format the result of the check operation according to the Airbyte Specification
output_message = {"type": "CONNECTION_STATUS", "connectionStatus": result}
print(json.dumps(output_message))
def log(message):
log_json = {"type": "LOG", "log": message}
print(json.dumps(log_json))
def discover():
catalog = {
"streams": [{
"name": "stock_prices",
"supported_sync_modes": ["full_refresh"],
"json_schema": {
"properties": {
"date": {
"type": "string"
},
"price": {
"type": "number"
},
"stock_ticker": {
"type": "string"
}
}
}
}]
}
airbyte_message = {"type": "CATALOG", "catalog": catalog}
print(json.dumps(airbyte_message))
def get_input_file_path(path):
if os.path.isabs(path):
return path
else:
return os.path.join(os.getcwd(), path)
def spec():
# Read the file named spec.json from the module directory as a JSON file
current_script_directory = os.path.dirname(os.path.realpath(__file__))
spec_path = os.path.join(current_script_directory, "spec.json")
specification = read_json(spec_path)
# form an Airbyte Message containing the spec and print it to stdout
airbyte_message = {"type": "SPEC", "spec": specification}
# json.dumps converts the JSON (python dict) to a string
print(json.dumps(airbyte_message))
def run(args):
parent_parser = argparse.ArgumentParser(add_help=False)
main_parser = argparse.ArgumentParser()
subparsers = main_parser.add_subparsers(title="commands", dest="command")
# Accept the spec command
subparsers.add_parser("spec", help="outputs the json configuration specification", parents=[parent_parser])
# Accept the check command
check_parser = subparsers.add_parser("check", help="checks the config used to connect", parents=[parent_parser])
required_check_parser = check_parser.add_argument_group("required named arguments")
required_check_parser.add_argument("--config", type=str, required=True, help="path to the json configuration file")
# Accept the discover command
discover_parser = subparsers.add_parser("discover", help="outputs a catalog describing the source's schema", parents=[parent_parser])
required_discover_parser = discover_parser.add_argument_group("required named arguments")
required_discover_parser.add_argument("--config", type=str, required=True, help="path to the json configuration file")
# Accept the read command
read_parser = subparsers.add_parser("read", help="reads the source and outputs messages to STDOUT", parents=[parent_parser])
read_parser.add_argument("--state", type=str, required=False, help="path to the json-encoded state file")
required_read_parser = read_parser.add_argument_group("required named arguments")
required_read_parser.add_argument("--config", type=str, required=True, help="path to the json configuration file")
required_read_parser.add_argument(
"--catalog", type=str, required=True, help="path to the catalog used to determine which data to read"
)
parsed_args = main_parser.parse_args(args)
command = parsed_args.command
if command == "spec":
spec()
elif command == "check":
config_file_path = get_input_file_path(parsed_args.config)
config = read_json(config_file_path)
check(config)
elif command == "discover":
discover()
elif command == "read":
config = read_json(get_input_file_path(parsed_args.config))
configured_catalog = read_json(get_input_file_path(parsed_args.catalog))
read(config, configured_catalog)
else:
# If we don't recognize the command log the problem and exit with an error code greater than 0 to indicate the process
# had a failure
log("Invalid command. Allowable commands: [spec, check, discover, read]")
sys.exit(1)
# A zero exit code means the process successfully completed
sys.exit(0)
def main():
arguments = sys.argv[1:]
run(arguments)
if __name__ == "__main__":
main()
| 41.018957 | 139 | 0.678336 |
f33dbc025ee623ac05f8905db0d4cf6a02df45a0
| 2,391 |
py
|
Python
|
src/algorithm/sbcn.py
|
ShuhuaGao/sbcn_mmc
|
f00dea13c978cd97ad4a5c4b9eda48b5202239db
|
[
"MIT"
] | null | null | null |
src/algorithm/sbcn.py
|
ShuhuaGao/sbcn_mmc
|
f00dea13c978cd97ad4a5c4b9eda48b5202239db
|
[
"MIT"
] | null | null | null |
src/algorithm/sbcn.py
|
ShuhuaGao/sbcn_mmc
|
f00dea13c978cd97ad4a5c4b9eda48b5202239db
|
[
"MIT"
] | null | null | null |
"""
Switched Boolean Control Network
Author: Gao Shuhua
Date: 2019/10/18
"""
class SBCN:
"""
Switched BCN
"""
def __init__(self, n, m, w, Ls, g, Cx=None, Cu=None, Cs=None):
"""
Initialize a switched Boolean network.
:param n: number of state variables
:param m: number of control inputs
:param w: number of sub-systems
:param Ls: a list of transition matrices, each for one sub-system
:param Cx: state constraints, a set of ints or None (no constraints)
:param Cu: control constraints, a functor: int --> a set of ints, or None
:param Cs: switching constraints, a functor: int --> a set of ints, or None
:param g: stage cost function: (int, int, int) --> float
"""
self.n = n
self._N = 2 ** n
self.m = m
self._M = 2 ** m
self._all_controls = list(range(1, self._M + 1)) # if no constraints
self.w = w
self.g = g
self._all_subs = list(range(1, self.w + 1))
self.Ls = Ls
self.Cx = Cx
self.Cu = Cu
self.Cs = Cs
def compute_successors(self, i):
"""
Get the succeeding states of i, i.e., R(i, 1), and the associated optimal weight, control, and switch
:param i: current state
:return: a list, each element being a tuple (j, weight, control, switch)
"""
if self.Cx is not None:
assert i in self.Cx, f"The given state {i} violates the state constraints"
controls = self.Cu(i) if self.Cu else self._all_controls
subs = self.Cs(i) if self.Cs else self._all_subs
successors = {}
# note that k, l, and i here start from 1
for k in controls:
for l in subs:
L = self.Ls[l - 1]
blk = L[(k - 1) * self._N: k * self._N ]
j = blk[i - 1]
if self.Cx is not None: # the successor j must satisfy the state constraints
if j not in self.Cx:
continue
weight = self.g(i, k, l)
if j in successors:
if weight < successors[j][0]:
successors[j] = (weight, k, l) # a better one
else:
successors[j] = (weight, k, l)
return [(j, *info) for j, info in successors.items()]
| 36.227273 | 109 | 0.528649 |
5ffb22d6ec2fd8a397d2e00d794a058d7b40750f
| 13 |
py
|
Python
|
test.py
|
SoheilKhodayari/Graph
|
2cefa75da2dfc34e30c5f5784e180a5dc79d9095
|
[
"MIT"
] | 1 |
2015-05-05T22:08:37.000Z
|
2015-05-05T22:08:37.000Z
|
test.py
|
SoheilKhodayari/Graph
|
2cefa75da2dfc34e30c5f5784e180a5dc79d9095
|
[
"MIT"
] | null | null | null |
test.py
|
SoheilKhodayari/Graph
|
2cefa75da2dfc34e30c5f5784e180a5dc79d9095
|
[
"MIT"
] | null | null | null |
""" Test """
| 6.5 | 12 | 0.307692 |
f7eb6e9870586c3a8fc3dd34ee8247899a75163b
| 5,802 |
py
|
Python
|
src/gym_selfx/render/draw.py
|
mountain/self
|
189e00e810d4d719fa6b37b400eef17d2521a64c
|
[
"MIT"
] | 5 |
2019-12-30T16:18:06.000Z
|
2022-03-22T17:36:09.000Z
|
src/gym_selfx/render/draw.py
|
mountain/self
|
189e00e810d4d719fa6b37b400eef17d2521a64c
|
[
"MIT"
] | 2 |
2019-12-30T10:09:15.000Z
|
2020-01-03T02:48:18.000Z
|
src/gym_selfx/render/draw.py
|
mountain/self
|
189e00e810d4d719fa6b37b400eef17d2521a64c
|
[
"MIT"
] | 3 |
2019-12-30T06:10:04.000Z
|
2021-04-23T09:37:48.000Z
|
# -*- coding: utf-8 -*-
#
# Python version Copyright (c) 2015 John Stowers
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import cv2
import random
import numpy as np
from Box2D import (b2Color, b2DistanceJoint, b2MouseJoint, b2PulleyJoint)
from Box2D.Box2D import (b2_staticBody as staticBody, b2_dynamicBody as dynamicBody, b2_kinematicBody as kinematicBody, b2PolygonShape as polygonShape,
b2CircleShape as circleShape, b2LoopShape as loopShape, b2EdgeShape as edgeShape)
import matplotlib.pyplot as plt
def cvcolor(color):
return int(255.0 * color[2]), int(255.0 * color[1]), int(255.0 * color[0])
def cvcoord(pos):
return tuple(map(int, pos))
class OpencvDrawFuncs(object):
def __init__(self, w, h, ppm, fill_polygon=True, flip_y=True):
self._w = w
self._h = h
self._ppm = ppm
self._colors = {
staticBody: (255, 255, 255),
dynamicBody: (255, 0, 0),
kinematicBody: (127, 255, 230),
}
self._fill_polygon = fill_polygon
self._flip_y = flip_y
self.screen = np.zeros((self._h, self._w, 3), np.uint8)
def install(self):
polygonShape.draw = self._draw_polygon
circleShape.draw = self._draw_circle
loopShape.draw = self._draw_loop
edgeShape.draw = self._draw_edge
def draw_world(self, world):
for body in world.bodies:
for fixture in body.fixtures:
fixture.shape.draw(body, fixture)
for joint in world.joints:
self._draw_joint(joint)
def clear_screen(self, screen=None):
if screen is None:
self.screen.fill(0)
else:
self.screen = screen
def _fix_vertices(self, vertices):
if self._flip_y:
return [(v[0], self._h - v[1]) for v in vertices]
else:
return [(v[0], v[1]) for v in vertices]
def _draw_joint(self, joint):
bodyA, bodyB = joint.bodyA, joint.bodyB
xf1, xf2 = bodyA.transform, bodyB.transform
x1, x2 = xf1.position, xf2.position
p1, p2 = joint.anchorA, joint.anchorB
color = b2Color(0.5, 0.8, 0.8)
x1, x2, p1, p2 = self._fix_vertices((x1 * self._ppm, x2 * self._ppm,
p1 * self._ppm, p2 * self._ppm))
if isinstance(joint, b2DistanceJoint):
cv2.line(self.screen, cvcoord(p1), cvcoord(p2), cvcolor(color), 1)
elif isinstance(joint, b2PulleyJoint):
s1, s2 = joint.groundAnchorA, joint.groundAnchorB
s1, s2 = self._fix_vertices((s1 * self._ppm, s2 * self._ppm))
cv2.line(self.screen, cvcoord(s1), cvcoord(p1), cvcolor(color), 1)
cv2.line(self.screen, cvcoord(s2), cvcoord(p2), cvcolor(color), 1)
cv2.line(self.screen, cvcoord(s1), cvcoord(s2), cvcolor(color), 1)
elif isinstance(joint, b2MouseJoint):
pass # don't draw it here
else:
cv2.line(self.screen, cvcoord(x1), cvcoord(p1), cvcolor(color), 1)
cv2.line(self.screen, cvcoord(p1), cvcoord(p2), cvcolor(color), 1)
cv2.line(self.screen, cvcoord(x2), cvcoord(p2), cvcolor(color), 1)
def _draw_polygon(self, body, fixture):
polygon = fixture.shape
transform = body.transform
vertices = self._fix_vertices([transform * v * self._ppm
for v in polygon.vertices])
pts = np.array(vertices, np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(self.screen, [pts], True, self._colors[body.type])
if self._fill_polygon:
lightc = np.array(self._colors[body.type], dtype=int) * 0.5
cv2.fillPoly(self.screen, [pts], lightc)
def _draw_circle(self, body, fixture):
circle = fixture.shape
position = self._fix_vertices(
[body.transform * circle.pos * self._ppm])[0]
if self._fill_polygon:
cv2.circle(self.screen, cvcoord(position), int(
circle.radius * self._ppm), body.userData['color'], -1)
else:
cv2.circle(self.screen, cvcoord(position), int(
circle.radius * self._ppm), body.userData['color'], 1)
def _draw_edge(self, body, fixture):
edge = fixture.shape
v = [body.transform * edge.vertex1 * self._ppm,
body.transform * edge.vertex2 * self._ppm]
vertices = self._fix_vertices(v)
cv2.line(self.screen, cvcoord(vertices[0]),
cvcoord(vertices[1]), self._colors[body.type], 1)
def _draw_loop(self, body, fixture):
loop = fixture.shape
transform = body.transform
vertices = self._fix_vertices([transform * v * self._ppm
for v in loop.vertices])
v1 = vertices[-1]
for v2 in vertices:
cv2.line(self.screen, cvcoord(v1), cvcoord(v2),
self._colors[body.type], 1)
v1 = v2
| 38.939597 | 151 | 0.616512 |
b68f35fe788668680ef195a390beb63c42b1e2a4
| 80,850 |
py
|
Python
|
bayespy/inference/vmp/nodes/gaussian.py
|
deebuls/bayespy
|
9b037eefeca3657f6f78d9e90b49aa619a89b4f1
|
[
"MIT"
] | 1 |
2021-06-02T21:31:47.000Z
|
2021-06-02T21:31:47.000Z
|
bayespy/inference/vmp/nodes/gaussian.py
|
libpcap/bayespy
|
9b037eefeca3657f6f78d9e90b49aa619a89b4f1
|
[
"MIT"
] | null | null | null |
bayespy/inference/vmp/nodes/gaussian.py
|
libpcap/bayespy
|
9b037eefeca3657f6f78d9e90b49aa619a89b4f1
|
[
"MIT"
] | null | null | null |
################################################################################
# Copyright (C) 2011-2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Module for the Gaussian distribution and similar distributions.
"""
import numpy as np
from scipy import special
from bayespy.utils import (random,
misc,
linalg)
from bayespy.utils.linalg import dot, mvdot
from .expfamily import (ExponentialFamily,
ExponentialFamilyDistribution,
useconstructor)
from .wishart import (WishartMoments,
WishartPriorMoments)
from .gamma import (GammaMoments,
GammaPriorMoments)
from .deterministic import Deterministic
from .node import (Moments,
ensureparents)
#
# MOMENTS
#
class GaussianMoments(Moments):
r"""
Class for the moments of Gaussian variables.
"""
def __init__(self, shape):
self.shape = shape
self.ndim = len(shape)
self.dims = (shape, 2*shape)
super().__init__()
def compute_fixed_moments(self, x):
r"""
Compute the moments for a fixed value
"""
x = misc.atleast_nd(x, self.ndim)
return [x, linalg.outer(x, x, ndim=self.ndim)]
@classmethod
def from_values(cls, x, ndim):
r"""
Return the shape of the moments for a fixed value.
"""
if ndim == 0:
return cls(())
else:
return cls(np.shape(x)[-ndim:])
def get_instance_conversion_kwargs(self):
return dict(ndim=self.ndim)
def get_instance_converter(self, ndim):
if ndim == self.ndim or ndim is None:
return None
return GaussianToGaussian(self, ndim)
class GaussianToGaussian():
def __init__(self, moments_from, ndim_to):
if not isinstance(moments_from, GaussianMoments):
raise ValueError()
if ndim_to < 0:
return ValueError("ndim_to must be non-negative")
self.shape_from = moments_from.shape
self.ndim_from = moments_from.ndim
self.ndim_to = ndim_to
if self.ndim_to > self.ndim_from:
raise ValueError()
if self.ndim_to == 0:
self.moments = GaussianMoments(())
else:
self.moments = GaussianMoments(self.shape_from[-self.ndim_to:])
return
def compute_moments(self, u):
if self.ndim_to == self.ndim_from:
return u
u0 = u[0]
u1 = misc.get_diag(u[1], ndim=self.ndim_from, ndim_to=self.ndim_to)
return [u0, u1]
def compute_message_to_parent(self, m):
# Handle broadcasting in m_child
m0 = m[0] * np.ones(self.shape_from)
m1 = (
misc.make_diag(m[1], ndim=self.ndim_from, ndim_from=self.ndim_to)
* misc.identity(*self.shape_from)
)
return [m0, m1]
def compute_weights_to_parent(self, weights):
diff = self.ndim_from - self.ndim_to
if diff == 0:
return weights
return np.sum(
weights * np.ones(self.shape_from[:diff]),
#misc.atleast_nd(weights, diff),
axis=tuple(range(-diff, 0))
)
def plates_multiplier_from_parent(self, plates_multiplier):
diff = self.ndim_from - self.ndim_to
return plates_multiplier + diff * (1,)
def plates_from_parent(self, plates):
diff = self.ndim_from - self.ndim_to
if diff == 0:
return plates
return plates + self.shape_from[:diff]
def plates_to_parent(self, plates):
diff = self.ndim_from - self.ndim_to
if diff == 0:
return plates
return plates[:-diff]
class GaussianGammaMoments(Moments):
r"""
Class for the moments of Gaussian-gamma-ISO variables.
"""
def __init__(self, shape):
r"""
Create moments object for Gaussian-gamma isotropic variables
ndim=0: scalar
ndim=1: vector
ndim=2: matrix
...
"""
self.shape = shape
self.ndim = len(shape)
self.dims = (shape, 2*shape, (), ())
super().__init__()
def compute_fixed_moments(self, x, alpha):
r"""
Compute the moments for a fixed value
`x` is a mean vector.
`alpha` is a precision scale
"""
x = np.asanyarray(x)
alpha = np.asanyarray(alpha)
u0 = x * misc.add_trailing_axes(alpha, self.ndim)
u1 = (linalg.outer(x, x, ndim=self.ndim)
* misc.add_trailing_axes(alpha, 2*self.ndim))
u2 = np.copy(alpha)
u3 = np.log(alpha)
u = [u0, u1, u2, u3]
return u
@classmethod
def from_values(cls, x, alpha, ndim):
r"""
Return the shape of the moments for a fixed value.
"""
if ndim == 0:
shape = ( (), (), (), () )
else:
shape = np.shape(x)[-ndim:]
return cls(shape)
def get_instance_conversion_kwargs(self):
return dict(ndim=self.ndim)
def get_instance_converter(self, ndim):
# FIXME/TODO: IMPLEMENT THIS CORRECTLY!
if ndim != self.ndim:
raise NotImplementedError(
"Conversion to different ndim in GaussianMoments not yet "
"implemented."
)
return None
class GaussianWishartMoments(Moments):
r"""
Class for the moments of Gaussian-Wishart variables.
"""
def __init__(self, shape):
self.shape = shape
self.ndim = len(shape)
self.dims = ( shape, (), 2*shape, () )
super().__init__()
def compute_fixed_moments(self, x, Lambda):
r"""
Compute the moments for a fixed value
`x` is a vector.
`Lambda` is a precision matrix
"""
x = np.asanyarray(x)
Lambda = np.asanyarray(Lambda)
u0 = linalg.mvdot(Lambda, x, ndim=self.ndim)
u1 = np.einsum(
'...i,...ij,...j->...',
misc.flatten_axes(x, self.ndim),
misc.flatten_axes(Lambda, self.ndim, self.ndim),
misc.flatten_axes(x, self.ndim)
)
u2 = np.copy(Lambda)
u3 = linalg.logdet_cov(Lambda, ndim=self.ndim)
return [u0, u1, u2, u3]
@classmethod
def from_values(self, x, Lambda, ndim):
r"""
Return the shape of the moments for a fixed value.
"""
if ndim == 0:
return cls(())
else:
if np.ndim(x) < ndim:
raise ValueError("Mean must be a vector")
shape = np.shape(x)[-ndim:]
if np.shape(Lambda)[-2*ndim:] != shape + shape:
raise ValueError("Shapes inconsistent")
return cls(shape)
#
# DISTRIBUTIONS
#
class GaussianDistribution(ExponentialFamilyDistribution):
r"""
Class for the VMP formulas of Gaussian variables.
Currently, supports only vector variables.
Notes
-----
Message passing equations:
.. math::
\mathbf{x} &\sim \mathcal{N}(\boldsymbol{\mu}, \mathbf{\Lambda}),
.. math::
\mathbf{x},\boldsymbol{\mu} \in \mathbb{R}^{D},
\quad \mathbf{\Lambda} \in \mathbb{R}^{D \times D},
\quad \mathbf{\Lambda} \text{ symmetric positive definite}
.. math::
\log\mathcal{N}( \mathbf{x} | \boldsymbol{\mu}, \mathbf{\Lambda} )
&=
- \frac{1}{2} \mathbf{x}^{\mathrm{T}} \mathbf{\Lambda} \mathbf{x}
+ \mathbf{x}^{\mathrm{T}} \mathbf{\Lambda} \boldsymbol{\mu}
- \frac{1}{2} \boldsymbol{\mu}^{\mathrm{T}} \mathbf{\Lambda}
\boldsymbol{\mu}
+ \frac{1}{2} \log |\mathbf{\Lambda}|
- \frac{D}{2} \log (2\pi)
"""
def __init__(self, shape):
self.shape = shape
self.ndim = len(shape)
super().__init__()
def compute_message_to_parent(self, parent, index, u, u_mu_Lambda):
r"""
Compute the message to a parent node.
.. math::
\boldsymbol{\phi}_{\boldsymbol{\mu}} (\mathbf{x}, \mathbf{\Lambda})
&=
\left[ \begin{matrix}
\mathbf{\Lambda} \mathbf{x}
\\
- \frac{1}{2} \mathbf{\Lambda}
\end{matrix} \right]
\\
\boldsymbol{\phi}_{\mathbf{\Lambda}} (\mathbf{x}, \boldsymbol{\mu})
&=
\left[ \begin{matrix}
- \frac{1}{2} \mathbf{xx}^{\mathrm{T}}
+ \frac{1}{2} \mathbf{x}\boldsymbol{\mu}^{\mathrm{T}}
+ \frac{1}{2} \boldsymbol{\mu}\mathbf{x}^{\mathrm{T}}
- \frac{1}{2} \boldsymbol{\mu\mu}^{\mathrm{T}}
\\
\frac{1}{2}
\end{matrix} \right]
"""
if index == 0:
x = u[0]
xx = u[1]
m0 = x
m1 = -0.5
m2 = -0.5*xx
m3 = 0.5
return [m0, m1, m2, m3]
else:
raise ValueError("Index out of bounds")
def compute_phi_from_parents(self, u_mu_Lambda, mask=True):
r"""
Compute the natural parameter vector given parent moments.
.. math::
\boldsymbol{\phi} (\boldsymbol{\mu}, \mathbf{\Lambda})
&=
\left[ \begin{matrix}
\mathbf{\Lambda} \boldsymbol{\mu}
\\
- \frac{1}{2} \mathbf{\Lambda}
\end{matrix} \right]
"""
Lambda_mu = u_mu_Lambda[0]
Lambda = u_mu_Lambda[2]
return [Lambda_mu,
-0.5 * Lambda]
def compute_moments_and_cgf(self, phi, mask=True):
r"""
Compute the moments and :math:`g(\phi)`.
.. math::
\overline{\mathbf{u}} (\boldsymbol{\phi})
&=
\left[ \begin{matrix}
- \frac{1}{2} \boldsymbol{\phi}^{-1}_2 \boldsymbol{\phi}_1
\\
\frac{1}{4} \boldsymbol{\phi}^{-1}_2 \boldsymbol{\phi}_1
\boldsymbol{\phi}^{\mathrm{T}}_1 \boldsymbol{\phi}^{-1}_2
- \frac{1}{2} \boldsymbol{\phi}^{-1}_2
\end{matrix} \right]
\\
g_{\boldsymbol{\phi}} (\boldsymbol{\phi})
&=
\frac{1}{4} \boldsymbol{\phi}^{\mathrm{T}}_1 \boldsymbol{\phi}^{-1}_2
\boldsymbol{\phi}_1
+ \frac{1}{2} \log | -2 \boldsymbol{\phi}_2 |
"""
# TODO: Compute -2*phi[1] and simplify the formulas
L = linalg.chol(-2*phi[1], ndim=self.ndim)
k = np.shape(phi[0])[-1]
# Moments
u0 = linalg.chol_solve(L, phi[0], ndim=self.ndim)
u1 = (linalg.outer(u0, u0, ndim=self.ndim)
+ linalg.chol_inv(L, ndim=self.ndim))
u = [u0, u1]
# G
g = (-0.5 * linalg.inner(u[0], phi[0], ndim=self.ndim)
+ 0.5 * linalg.chol_logdet(L, ndim=self.ndim))
return (u, g)
def compute_cgf_from_parents(self, u_mu_Lambda):
r"""
Compute :math:`\mathrm{E}_{q(p)}[g(p)]`
.. math::
g (\boldsymbol{\mu}, \mathbf{\Lambda})
&=
- \frac{1}{2} \operatorname{tr}(\boldsymbol{\mu\mu}^{\mathrm{T}}
\mathbf{\Lambda} )
+ \frac{1}{2} \log |\mathbf{\Lambda}|
"""
mu_Lambda_mu = u_mu_Lambda[1]
logdet_Lambda = u_mu_Lambda[3]
g = -0.5*mu_Lambda_mu + 0.5*logdet_Lambda
return g
def compute_fixed_moments_and_f(self, x, mask=True):
r"""
Compute the moments and :math:`f(x)` for a fixed value.
.. math::
\mathbf{u} (\mathbf{x})
&=
\left[ \begin{matrix}
\mathbf{x}
\\
\mathbf{xx}^{\mathrm{T}}
\end{matrix} \right]
\\
f(\mathbf{x})
&= - \frac{D}{2} \log(2\pi)
"""
k = np.shape(x)[-1]
u = [x, linalg.outer(x, x, ndim=self.ndim)]
f = -k/2*np.log(2*np.pi)
return (u, f)
def compute_gradient(self, g, u, phi):
r"""
Compute the standard gradient with respect to the natural parameters.
Gradient of the moments:
.. math::
\mathrm{d}\overline{\mathbf{u}} &=
\begin{bmatrix}
\frac{1}{2} \phi_2^{-1} \mathrm{d}\phi_2 \phi_2^{-1} \phi_1
- \frac{1}{2} \phi_2^{-1} \mathrm{d}\phi_1
\\
- \frac{1}{4} \phi_2^{-1} \mathrm{d}\phi_2 \phi_2^{-1} \phi_1 \phi_1^{\mathrm{T}} \phi_2^{-1}
- \frac{1}{4} \phi_2^{-1} \phi_1 \phi_1^{\mathrm{T}} \phi_2^{-1} \mathrm{d}\phi_2 \phi_2^{-1}
+ \frac{1}{2} \phi_2^{-1} \mathrm{d}\phi_2 \phi_2^{-1}
+ \frac{1}{4} \phi_2^{-1} \mathrm{d}\phi_1 \phi_1^{\mathrm{T}} \phi_2^{-1}
+ \frac{1}{4} \phi_2^{-1} \phi_1 \mathrm{d}\phi_1^{\mathrm{T}} \phi_2^{-1}
\end{bmatrix}
\\
&=
\begin{bmatrix}
2 (\overline{u}_2 - \overline{u}_1 \overline{u}_1^{\mathrm{T}}) \mathrm{d}\phi_2 \overline{u}_1
+ (\overline{u}_2 - \overline{u}_1 \overline{u}_1^{\mathrm{T}}) \mathrm{d}\phi_1
\\
u_2 d\phi_2 u_2 - 2 u_1 u_1^T d\phi_2 u_1 u_1^T
+ 2 (u_2 - u_1 u_1^T) d\phi_1 u_1^T
\end{bmatrix}
Standard gradient given the gradient with respect to the moments, that
is, given the Riemannian gradient :math:`\tilde{\nabla}`:
.. math::
\nabla =
\begin{bmatrix}
(\overline{u}_2 - \overline{u}_1 \overline{u}_1^{\mathrm{T}}) \tilde{\nabla}_1
+ 2 (u_2 - u_1 u_1^T) \tilde{\nabla}_2 u_1
\\
(u_2 - u_1 u_1^T) \tilde{\nabla}_1 u_1^T
+ u_1 \tilde{\nabla}_1^T (u_2 - u_1 u_1^T)
+ 2 u_2 \tilde{\nabla}_2 u_2
- 2 u_1 u_1^T \tilde{\nabla}_2 u_1 u_1^T
\end{bmatrix}
"""
ndim = 1
x = u[0]
xx = u[1]
# Some helpful variables
x_x = linalg.outer(x, x, ndim=self.ndim)
Cov = xx - x_x
cov_g0 = linalg.mvdot(Cov, g[0], ndim=self.ndim)
cov_g0_x = linalg.outer(cov_g0, x, ndim=self.ndim)
g1_x = linalg.mvdot(g[1], x, ndim=self.ndim)
# Compute gradient terms
d0 = cov_g0 + 2 * linalg.mvdot(Cov, g1_x, ndim=self.ndim)
d1 = (cov_g0_x + linalg.transpose(cov_g0_x, ndim=self.ndim)
+ 2 * linalg.mmdot(xx,
linalg.mmdot(g[1], xx, ndim=self.ndim),
ndim=self.ndim)
- 2 * x_x * misc.add_trailing_axes(linalg.inner(g1_x,
x,
ndim=self.ndim),
2*self.ndim))
return [d0, d1]
def random(self, *phi, plates=None):
r"""
Draw a random sample from the distribution.
"""
# TODO/FIXME: You shouldn't draw random values for
# observed/fixed elements!
# Note that phi[1] is -0.5*inv(Cov)
U = linalg.chol(-2*phi[1], ndim=self.ndim)
mu = linalg.chol_solve(U, phi[0], ndim=self.ndim)
shape = plates + self.shape
z = np.random.randn(*shape)
# Denote Lambda = -2*phi[1]
# Then, Cov = inv(Lambda) = inv(U'*U) = inv(U) * inv(U')
# Thus, compute mu + U\z
z = linalg.solve_triangular(U, z, trans='N', lower=False, ndim=self.ndim)
return mu + z
class GaussianARDDistribution(ExponentialFamilyDistribution):
r"""
...
Log probability density function:
.. math::
\log p(x|\mu, \alpha) = -\frac{1}{2} x^T \mathrm{diag}(\alpha) x + x^T
\mathrm{diag}(\alpha) \mu - \frac{1}{2} \mu^T \mathrm{diag}(\alpha) \mu
+ \frac{1}{2} \sum_i \log \alpha_i - \frac{D}{2} \log(2\pi)
Parent has moments:
.. math::
\begin{bmatrix}
\alpha \circ \mu
\\
\alpha \circ \mu \circ \mu
\\
\alpha
\\
\log(\alpha)
\end{bmatrix}
"""
def __init__(self, shape):
self.shape = shape
self.ndim = len(shape)
super().__init__()
def compute_message_to_parent(self, parent, index, u, u_mu_alpha):
r"""
...
.. math::
m =
\begin{bmatrix}
x
\\
[-\frac{1}{2}, \ldots, -\frac{1}{2}]
\\
-\frac{1}{2} \mathrm{diag}(xx^T)
\\
[\frac{1}{2}, \ldots, \frac{1}{2}]
\end{bmatrix}
"""
if index == 0:
x = u[0]
x2 = misc.get_diag(u[1], ndim=self.ndim)
m0 = x
m1 = -0.5 * np.ones(self.shape)
m2 = -0.5 * x2
m3 = 0.5 * np.ones(self.shape)
return [m0, m1, m2, m3]
else:
raise ValueError("Invalid parent index")
def compute_weights_to_parent(self, index, weights):
r"""
Maps the mask to the plates of a parent.
"""
if index != 0:
raise IndexError()
return misc.add_trailing_axes(weights, self.ndim)
def compute_phi_from_parents(self, u_mu_alpha, mask=True):
alpha_mu = u_mu_alpha[0]
alpha = u_mu_alpha[2]
#mu = u_mu[0]
#alpha = u_alpha[0]
## if np.ndim(mu) < self.ndim_mu:
## raise ValueError("Moment of mu does not have enough dimensions")
## mu = misc.add_axes(mu,
## axis=np.ndim(mu)-self.ndim_mu,
## num=self.ndim-self.ndim_mu)
phi0 = alpha_mu
phi1 = -0.5 * alpha
if self.ndim > 0:
# Ensure that phi is not using broadcasting for variable
# dimension axes
ones = np.ones(self.shape)
phi0 = ones * phi0
phi1 = ones * phi1
# Make a diagonal matrix
phi1 = misc.diag(phi1, ndim=self.ndim)
return [phi0, phi1]
def compute_moments_and_cgf(self, phi, mask=True):
if self.ndim == 0:
# Use scalar equations
u0 = -phi[0] / (2*phi[1])
u1 = u0**2 - 1 / (2*phi[1])
u = [u0, u1]
g = (-0.5 * u[0] * phi[0] + 0.5 * np.log(-2*phi[1]))
# TODO/FIXME: You could use these equations if phi is a scalar
# in practice although ndim>0 (because the shape can be, e.g.,
# (1,1,1,1) for ndim=4).
else:
# Reshape to standard vector and matrix
D = np.prod(self.shape)
phi0 = np.reshape(phi[0], phi[0].shape[:-self.ndim] + (D,))
phi1 = np.reshape(phi[1], phi[1].shape[:-2*self.ndim] + (D,D))
# Compute the moments
L = linalg.chol(-2*phi1)
Cov = linalg.chol_inv(L)
u0 = linalg.chol_solve(L, phi0)
u1 = linalg.outer(u0, u0) + Cov
# Compute CGF
g = (- 0.5 * np.einsum('...i,...i', u0, phi0)
+ 0.5 * linalg.chol_logdet(L))
# Reshape to arrays
u0 = np.reshape(u0, u0.shape[:-1] + self.shape)
u1 = np.reshape(u1, u1.shape[:-2] + self.shape + self.shape)
u = [u0, u1]
return (u, g)
def compute_cgf_from_parents(self, u_mu_alpha):
r"""
Compute the value of the cumulant generating function.
"""
# Compute sum(mu^2 * alpha) correctly for broadcasted shapes
alpha_mu2 = u_mu_alpha[1]
logdet_alpha = u_mu_alpha[3]
axes = tuple(range(-self.ndim, 0))
# TODO/FIXME: You could use plate multiplier type of correction instead
# of explicitly broadcasting with ones.
if self.ndim > 0:
alpha_mu2 = misc.sum_multiply(alpha_mu2, np.ones(self.shape),
axis=axes)
if self.ndim > 0:
logdet_alpha = misc.sum_multiply(logdet_alpha, np.ones(self.shape),
axis=axes)
# Compute g
g = -0.5*alpha_mu2 + 0.5*logdet_alpha
return g
def compute_fixed_moments_and_f(self, x, mask=True):
r""" Compute u(x) and f(x) for given x. """
if self.ndim > 0 and np.shape(x)[-self.ndim:] != self.shape:
raise ValueError("Invalid shape")
k = np.prod(self.shape)
u = [x, linalg.outer(x, x, ndim=self.ndim)]
f = -k/2*np.log(2*np.pi)
return (u, f)
def plates_to_parent(self, index, plates):
r"""
Resolves the plate mapping to a parent.
Given the plates of the node's moments, this method returns the plates
that the message to a parent has for the parent's distribution.
"""
if index != 0:
raise IndexError()
return plates + self.shape
def plates_from_parent(self, index, plates):
r"""
Resolve the plate mapping from a parent.
Given the plates of a parent's moments, this method returns the plates
that the moments has for this distribution.
"""
if index != 0:
raise IndexError()
if self.ndim == 0:
return plates
else:
return plates[:-self.ndim]
def random(self, *phi, plates=None):
r"""
Draw a random sample from the Gaussian distribution.
"""
# TODO/FIXME: You shouldn't draw random values for
# observed/fixed elements!
D = self.ndim
if D == 0:
dims = ()
else:
dims = np.shape(phi[0])[-D:]
if np.prod(dims) == 1.0:
# Scalar Gaussian
phi1 = phi[1]
if D > 0:
# Because the covariance matrix has shape (1,1,...,1,1),
# that is 2*D number of ones, remove the extra half of the
# shape
phi1 = np.reshape(phi1, np.shape(phi1)[:-2*D] + D*(1,))
var = -0.5 / phi1
std = np.sqrt(var)
mu = var * phi[0]
shape = plates + dims
z = np.random.randn(*shape)
x = mu + std * z
else:
N = np.prod(dims)
dims_cov = dims + dims
# Reshape precision matrix
plates_cov = np.shape(phi[1])[:-2*D]
V = -2 * np.reshape(phi[1], plates_cov + (N,N))
# Compute Cholesky
U = linalg.chol(V)
# Reshape mean vector
plates_phi0 = np.shape(phi[0])[:-D]
phi0 = np.reshape(phi[0], plates_phi0 + (N,))
mu = linalg.chol_solve(U, phi0)
# Compute mu + U\z
shape = plates + (N,)
z = np.random.randn(*shape)
# Denote Lambda = -2*phi[1]
# Then, Cov = inv(Lambda) = inv(U'*U) = inv(U) * inv(U')
# Thus, compute mu + U\z
x = mu + linalg.solve_triangular(U, z,
trans='N',
lower=False)
x = np.reshape(x, plates + dims)
return x
def compute_gradient(self, g, u, phi):
r"""
Compute the standard gradient with respect to the natural parameters.
Gradient of the moments:
.. math::
\mathrm{d}\overline{\mathbf{u}} &=
\begin{bmatrix}
\frac{1}{2} \phi_2^{-1} \mathrm{d}\phi_2 \phi_2^{-1} \phi_1
- \frac{1}{2} \phi_2^{-1} \mathrm{d}\phi_1
\\
- \frac{1}{4} \phi_2^{-1} \mathrm{d}\phi_2 \phi_2^{-1} \phi_1 \phi_1^{\mathrm{T}} \phi_2^{-1}
- \frac{1}{4} \phi_2^{-1} \phi_1 \phi_1^{\mathrm{T}} \phi_2^{-1} \mathrm{d}\phi_2 \phi_2^{-1}
+ \frac{1}{2} \phi_2^{-1} \mathrm{d}\phi_2 \phi_2^{-1}
+ \frac{1}{4} \phi_2^{-1} \mathrm{d}\phi_1 \phi_1^{\mathrm{T}} \phi_2^{-1}
+ \frac{1}{4} \phi_2^{-1} \phi_1 \mathrm{d}\phi_1^{\mathrm{T}} \phi_2^{-1}
\end{bmatrix}
\\
&=
\begin{bmatrix}
2 (\overline{u}_2 - \overline{u}_1 \overline{u}_1^{\mathrm{T}}) \mathrm{d}\phi_2 \overline{u}_1
+ (\overline{u}_2 - \overline{u}_1 \overline{u}_1^{\mathrm{T}}) \mathrm{d}\phi_1
\\
u_2 d\phi_2 u_2 - 2 u_1 u_1^T d\phi_2 u_1 u_1^T
+ 2 (u_2 - u_1 u_1^T) d\phi_1 u_1^T
\end{bmatrix}
Standard gradient given the gradient with respect to the moments, that
is, given the Riemannian gradient :math:`\tilde{\nabla}`:
.. math::
\nabla =
\begin{bmatrix}
(\overline{u}_2 - \overline{u}_1 \overline{u}_1^{\mathrm{T}}) \tilde{\nabla}_1
+ 2 (u_2 - u_1 u_1^T) \tilde{\nabla}_2 u_1
\\
(u_2 - u_1 u_1^T) \tilde{\nabla}_1 u_1^T
+ u_1 \tilde{\nabla}_1^T (u_2 - u_1 u_1^T)
+ 2 u_2 \tilde{\nabla}_2 u_2
- 2 u_1 u_1^T \tilde{\nabla}_2 u_1 u_1^T
\end{bmatrix}
"""
ndim = self.ndim
x = u[0]
xx = u[1]
# Some helpful variables
x_x = linalg.outer(x, x, ndim=ndim)
Cov = xx - x_x
cov_g0 = linalg.mvdot(Cov, g[0], ndim=ndim)
cov_g0_x = linalg.outer(cov_g0, x, ndim=ndim)
g1_x = linalg.mvdot(g[1], x, ndim=ndim)
# Compute gradient terms
d0 = cov_g0 + 2 * linalg.mvdot(Cov, g1_x, ndim=ndim)
d1 = (cov_g0_x + linalg.transpose(cov_g0_x, ndim=ndim)
+ 2 * linalg.mmdot(xx,
linalg.mmdot(g[1], xx, ndim=ndim),
ndim=ndim)
- 2 * x_x * misc.add_trailing_axes(linalg.inner(g1_x,
x,
ndim=ndim),
2*ndim))
return [d0, d1]
class GaussianGammaDistribution(ExponentialFamilyDistribution):
r"""
Class for the VMP formulas of Gaussian-Gamma-ISO variables.
Currently, supports only vector variables.
Log pdf of the prior:
.. math::
\log p(\mathbf{x}, \tau | \boldsymbol{\mu}, \mathbf{\Lambda}, a, b) =&
- \frac{1}{2} \tau \mathbf{x}^T \mathbf{\Lambda} \mathbf{x}
+ \frac{1}{2} \tau \mathbf{x}^T \mathbf{\Lambda} \boldsymbol{\mu}
+ \frac{1}{2} \tau \boldsymbol{\mu}^T \mathbf{\Lambda} \mathbf{x}
- \frac{1}{2} \tau \boldsymbol{\mu}^T \mathbf{\Lambda} \boldsymbol{\mu}
+ \frac{1}{2} \log|\mathbf{\Lambda}|
+ \frac{D}{2} \log\tau
- \frac{D}{2} \log(2\pi)
\\ &
- b \tau
+ a \log\tau
- \log\tau
+ a \log b
- \log \Gamma(a)
Log pdf of the posterior approximation:
.. math::
\log q(\mathbf{x}, \tau) =&
\tau \mathbf{x}^T \boldsymbol{\phi}_1
+ \tau \mathbf{x}^T \mathbf{\Phi}_2 \mathbf{x}
+ \tau \phi_3
+ \log\tau \phi_4
+ g(\boldsymbol{\phi}_1, \mathbf{\Phi}_2, \phi_3, \phi_4)
+ f(x, \tau)
"""
def __init__(self, ndim):
self.ndim = ndim
super().__init__()
def compute_message_to_parent(self, parent, index, u, u_mu_Lambda, u_a, u_b):
r"""
Compute the message to a parent node.
- Parent :math:`(\boldsymbol{\mu}, \mathbf{\Lambda})`
Moments:
.. math::
\begin{bmatrix}
\mathbf{\Lambda}\boldsymbol{\mu}
\\
\boldsymbol{\mu}^T\mathbf{\Lambda}\boldsymbol{\mu}
\\
\mathbf{\Lambda}
\\
\log|\mathbf{\Lambda}|
\end{bmatrix}
Message:
.. math::
\begin{bmatrix}
\langle \tau \mathbf{x} \rangle
\\
- \frac{1}{2} \langle \tau \rangle
\\
- \frac{1}{2} \langle \tau \mathbf{xx}^T \rangle
\\
\frac{1}{2}
\end{bmatrix}
- Parent :math:`a`:
Moments:
.. math::
\begin{bmatrix}
a
\\
\log \Gamma(a)
\end{bmatrix}
Message:
.. math::
\begin{bmatrix}
\langle \log\tau \rangle + \langle \log b \rangle
\\
-1
\end{bmatrix}
- Parent :math:`b`:
Moments:
.. math::
\begin{bmatrix}
b
\\
\log b
\end{bmatrix}
Message:
.. math::
\begin{bmatrix}
- \langle \tau \rangle
\\
\langle a \rangle
\end{bmatrix}
"""
x_tau = u[0]
xx_tau = u[1]
tau = u[2]
logtau = u[3]
if index == 0:
m0 = x_tau
m1 = -0.5 * tau
m2 = -0.5 * xx_tau
m3 = 0.5
return [m0, m1, m2, m3]
elif index == 1:
logb = u_b[1]
m0 = logtau + logb
m1 = -1
return [m0, m1]
elif index == 2:
a = u_a[0]
m0 = -tau
m1 = a
return [m0, m1]
else:
raise ValueError("Index out of bounds")
def compute_phi_from_parents(self, u_mu_Lambda, u_a, u_b, mask=True):
r"""
Compute the natural parameter vector given parent moments.
"""
Lambda_mu = u_mu_Lambda[0]
mu_Lambda_mu = u_mu_Lambda[1]
Lambda = u_mu_Lambda[2]
a = u_a[0]
b = u_b[0]
phi = [Lambda_mu,
-0.5*Lambda,
-0.5*mu_Lambda_mu - b,
a]
return phi
def compute_moments_and_cgf(self, phi, mask=True):
r"""
Compute the moments and :math:`g(\phi)`.
"""
# Compute helpful variables
V = -2*phi[1]
L_V = linalg.chol(V, ndim=self.ndim)
logdet_V = linalg.chol_logdet(L_V, ndim=self.ndim)
mu = linalg.chol_solve(L_V, phi[0], ndim=self.ndim)
Cov = linalg.chol_inv(L_V, ndim=self.ndim)
a = phi[3]
b = -phi[2] - 0.5 * linalg.inner(mu, phi[0], ndim=self.ndim)
log_b = np.log(b)
# Compute moments
u2 = a / b
u3 = -log_b + special.psi(a)
u0 = mu * misc.add_trailing_axes(u2, self.ndim)
u1 = Cov + (
linalg.outer(mu, mu, ndim=self.ndim)
* misc.add_trailing_axes(u2, 2 * self.ndim)
)
u = [u0, u1, u2, u3]
# Compute g
g = 0.5*logdet_V + a*log_b - special.gammaln(a)
return (u, g)
def compute_cgf_from_parents(self, u_mu_Lambda, u_a, u_b):
r"""
Compute :math:`\mathrm{E}_{q(p)}[g(p)]`
"""
logdet_Lambda = u_mu_Lambda[3]
a = u_a[0]
gammaln_a = u_a[1]
log_b = u_b[1]
g = 0.5*logdet_Lambda + a*log_b - gammaln_a
return g
def compute_fixed_moments_and_f(self, x, alpha, mask=True):
r"""
Compute the moments and :math:`f(x)` for a fixed value.
"""
logalpha = np.log(alpha)
u0 = x * misc.add_trailing_axes(alpha, self.ndim)
u1 = linalg.outer(x, x, ndim=self.ndim) * misc.add_trailing_axes(alpha, self.ndim)
u2 = alpha
u3 = logalpha
u = [u0, u1, u2, u3]
if self.ndim > 0:
D = np.prod(np.shape(x)[-ndim:])
else:
D = 1
f = (D/2 - 1) * logalpha - D/2 * np.log(2*np.pi)
return (u, f)
def random(self, *params, plates=None):
r"""
Draw a random sample from the distribution.
"""
raise NotImplementedError()
class GaussianWishartDistribution(ExponentialFamilyDistribution):
r"""
Class for the VMP formulas of Gaussian-Wishart variables.
Currently, supports only vector variables.
"""
def compute_message_to_parent(self, parent, index, u, u_mu_alpha, u_V, u_n):
r"""
Compute the message to a parent node.
"""
if index == 0:
raise NotImplementedError()
elif index == 1:
raise NotImplementedError()
elif index == 2:
raise NotImplementedError()
else:
raise ValueError("Index out of bounds")
def compute_phi_from_parents(self, u_mu_alpha, u_V, u_n, mask=True):
r"""
Compute the natural parameter vector given parent moments.
"""
raise NotImplementedError()
def compute_moments_and_cgf(self, phi, mask=True):
r"""
Compute the moments and :math:`g(\phi)`.
"""
raise NotImplementedError()
return (u, g)
def compute_cgf_from_parents(self, u_mu_alpha, u_V, u_n):
r"""
Compute :math:`\mathrm{E}_{q(p)}[g(p)]`
"""
raise NotImplementedError()
return g
def compute_fixed_moments_and_f(self, x, Lambda, mask=True):
r"""
Compute the moments and :math:`f(x)` for a fixed value.
"""
raise NotImplementedError()
return (u, f)
def random(self, *params, plates=None):
r"""
Draw a random sample from the distribution.
"""
raise NotImplementedError()
#
# NODES
#
class _GaussianTemplate(ExponentialFamily):
def translate(self, b, debug=False):
"""
Transforms the current posterior by adding a bias to the mean
Parameters
----------
b : array
Constant to add
"""
ndim = len(self.dims[0])
if ndim > 0 and np.shape(b)[-ndim:] != self.dims[0]:
raise ValueError("Bias has incorrect shape")
x = self.u[0]
xb = linalg.outer(x, b, ndim=ndim)
bx = linalg.transpose(xb, ndim=ndim)
bb = linalg.outer(b, b, ndim=ndim)
uh = [
self.u[0] + b,
self.u[1] + xb + bx + bb
]
Lambda = -2 * self.phi[1]
Lambda_b = linalg.mvdot(Lambda, b, ndim=ndim)
dg = -0.5 * (
linalg.inner(b, Lambda_b, ndim=ndim)
+ 2 * linalg.inner(x, Lambda_b, ndim=ndim)
)
phih = [
self.phi[0] + Lambda_b,
self.phi[1]
]
self._check_shape(uh)
self._check_shape(phih)
self.u = uh
self.phi = phih
self.g = self.g + dg
# TODO: This is all just debugging stuff and can be removed
if debug:
uh = [ui.copy() for ui in uh]
gh = self.g.copy()
self._update_moments_and_cgf()
if any(not np.allclose(uih, ui, atol=1e-6) for (uih, ui) in zip(uh, self.u)):
raise RuntimeError("BUG")
if not np.allclose(self.g, gh, atol=1e-6):
raise RuntimeError("BUG")
return
class Gaussian(_GaussianTemplate):
r"""
Node for Gaussian variables.
The node represents a :math:`D`-dimensional vector from the Gaussian
distribution:
.. math::
\mathbf{x} &\sim \mathcal{N}(\boldsymbol{\mu}, \mathbf{\Lambda}),
where :math:`\boldsymbol{\mu}` is the mean vector and
:math:`\mathbf{\Lambda}` is the precision matrix (i.e., inverse of the
covariance matrix).
.. math::
\mathbf{x},\boldsymbol{\mu} \in \mathbb{R}^{D},
\quad \mathbf{\Lambda} \in \mathbb{R}^{D \times D},
\quad \mathbf{\Lambda} \text{ symmetric positive definite}
Parameters
----------
mu : Gaussian-like node or GaussianGamma-like node or GaussianWishart-like node or array
Mean vector
Lambda : Wishart-like node or array
Precision matrix
See also
--------
Wishart, GaussianARD, GaussianWishart, GaussianGamma
"""
def __init__(self, mu, Lambda, **kwargs):
r"""
Create Gaussian node
"""
super().__init__(mu, Lambda, **kwargs)
@classmethod
def _constructor(cls, mu, Lambda, ndim=1, **kwargs):
r"""
Constructs distribution and moments objects.
"""
mu_Lambda = WrapToGaussianWishart(mu, Lambda, ndim=ndim)
shape = mu_Lambda._moments.shape
moments = GaussianMoments(shape)
parent_moments = (mu_Lambda._moments,)
if mu_Lambda.dims != ( shape, (), shape+shape, () ):
raise Exception("Parents have wrong dimensionality")
distribution = GaussianDistribution(shape)
parents = [mu_Lambda]
return (parents,
kwargs,
moments.dims,
cls._total_plates(kwargs.get('plates'),
distribution.plates_from_parent(0, mu_Lambda.plates)),
distribution,
moments,
parent_moments)
def initialize_from_parameters(self, mu, Lambda):
u = self._parent_moments[0].compute_fixed_moments(mu, Lambda)
self._initialize_from_parent_moments(u)
def __str__(self):
ndim = len(self.dims[0])
mu = self.u[0]
Cov = self.u[1] - linalg.outer(mu, mu, ndim=ndim)
return ("%s ~ Gaussian(mu, Cov)\n"
" mu = \n"
"%s\n"
" Cov = \n"
"%s\n"
% (self.name, mu, Cov))
def rotate(self, R, inv=None, logdet=None, Q=None):
# TODO/FIXME: Combine and refactor all these rotation transformations
# into _GaussianTemplate
if self._moments.ndim != 1:
raise NotImplementedError("Not implemented for ndim!=1 yet")
if inv is not None:
invR = inv
else:
invR = np.linalg.inv(R)
if logdet is not None:
logdetR = logdet
else:
logdetR = np.linalg.slogdet(R)[1]
# It would be more efficient and simpler, if you just rotated the
# moments and didn't touch phi. However, then you would need to call
# update() before lower_bound_contribution. This is more error-safe.
# Rotate plates, if plate rotation matrix is given. Assume that there's
# only one plate-axis
if Q is not None:
# Rotate moments using Q
self.u[0] = np.einsum('ik,kj->ij', Q, self.u[0])
sumQ = np.sum(Q, axis=0)
# Rotate natural parameters using Q
self.phi[1] = np.einsum('d,dij->dij', sumQ**(-2), self.phi[1])
self.phi[0] = np.einsum('dij,dj->di', -2*self.phi[1], self.u[0])
# Transform parameters using R
self.phi[0] = mvdot(invR.T, self.phi[0])
self.phi[1] = dot(invR.T, self.phi[1], invR)
if Q is not None:
self._update_moments_and_cgf()
else:
# Transform moments and g using R
self.u[0] = mvdot(R, self.u[0])
self.u[1] = dot(R, self.u[1], R.T)
self.g -= logdetR
def rotate_matrix(self, R1, R2, inv1=None, logdet1=None, inv2=None, logdet2=None, Q=None):
r"""
The vector is reshaped into a matrix by stacking the row vectors.
Computes R1*X*R2', which is identical to kron(R1,R2)*x (??)
Note that this is slightly different from the standard Kronecker product
definition because Numpy stacks row vectors instead of column vectors.
Parameters
----------
R1 : ndarray
A matrix from the left
R2 : ndarray
A matrix from the right
"""
if self._moments.ndim != 1:
raise NotImplementedError("Not implemented for ndim!=1 yet")
if Q is not None:
# Rotate moments using Q
self.u[0] = np.einsum('ik,kj->ij', Q, self.u[0])
sumQ = np.sum(Q, axis=0)
# Rotate natural parameters using Q
self.phi[1] = np.einsum('d,dij->dij', sumQ**(-2), self.phi[1])
self.phi[0] = np.einsum('dij,dj->di', -2*self.phi[1], self.u[0])
if inv1 is None:
inv1 = np.linalg.inv(R1)
if logdet1 is None:
logdet1 = np.linalg.slogdet(R1)[1]
if inv2 is None:
inv2 = np.linalg.inv(R2)
if logdet2 is None:
logdet2 = np.linalg.slogdet(R2)[1]
D1 = np.shape(R1)[0]
D2 = np.shape(R2)[0]
# Reshape into matrices
sh0 = np.shape(self.phi[0])[:-1] + (D1,D2)
sh1 = np.shape(self.phi[1])[:-2] + (D1,D2,D1,D2)
phi0 = np.reshape(self.phi[0], sh0)
phi1 = np.reshape(self.phi[1], sh1)
# Apply rotations to phi
#phi0 = dot(inv1, phi0, inv2.T)
phi0 = dot(inv1.T, phi0, inv2)
phi1 = np.einsum('...ia,...abcd->...ibcd', inv1.T, phi1)
phi1 = np.einsum('...ic,...abcd->...abid', inv1.T, phi1)
phi1 = np.einsum('...ib,...abcd->...aicd', inv2.T, phi1)
phi1 = np.einsum('...id,...abcd->...abci', inv2.T, phi1)
# Reshape back into vectors
self.phi[0] = np.reshape(phi0, self.phi[0].shape)
self.phi[1] = np.reshape(phi1, self.phi[1].shape)
# It'd be better to rotate the moments too..
self._update_moments_and_cgf()
class GaussianARD(_GaussianTemplate):
r"""
Node for Gaussian variables with ARD prior.
The node represents a :math:`D`-dimensional vector from the Gaussian
distribution:
.. math::
\mathbf{x} &\sim \mathcal{N}(\boldsymbol{\mu}, \mathrm{diag}(\boldsymbol{\alpha})),
where :math:`\boldsymbol{\mu}` is the mean vector and
:math:`\mathrm{diag}(\boldsymbol{\alpha})` is the diagonal precision matrix
(i.e., inverse of the covariance matrix).
.. math::
\mathbf{x},\boldsymbol{\mu} \in \mathbb{R}^{D}, \quad \alpha_d > 0 \text{
for } d=0,\ldots,D-1
*Note:* The form of the posterior approximation is a Gaussian distribution with full
covariance matrix instead of a diagonal matrix.
Parameters
----------
mu : Gaussian-like node or GaussianGamma-like node or array Mean vector
alpha : gamma-like node or array
Diagonal elements of the precision matrix
See also
--------
Gamma, Gaussian, GaussianGamma, GaussianWishart
"""
def __init__(self, mu, alpha, ndim=None, shape=None, **kwargs):
r"""
Create GaussianARD node.
"""
super().__init__(mu, alpha, ndim=ndim, shape=shape, **kwargs)
@classmethod
def _constructor(cls, mu, alpha, ndim=None, shape=None, **kwargs):
r"""
Constructs distribution and moments objects.
If __init__ uses useconstructor decorator, this method is called to
construct distribution and moments objects.
The method is given the same inputs as __init__. For some nodes, some of
these can't be "static" class attributes, then the node class must
overwrite this method to construct the objects manually.
The point of distribution class is to move general distribution but
not-node specific code. The point of moments class is to define the
messaging protocols.
"""
mu_alpha = WrapToGaussianGamma(mu, alpha, ndim=0)
if ndim is None:
if shape is not None:
ndim = len(shape)
else:
shape = ()
ndim = 0
else:
if shape is not None:
if ndim != len(shape):
raise ValueError("Given shape and ndim inconsistent")
else:
if ndim == 0:
shape = ()
else:
if ndim > len(mu_alpha.plates):
raise ValueError(
"Cannot determine shape for ndim={0} because parent "
"full shape has ndim={1}."
.format(ndim, len(mu_alpha.plates))
)
shape = mu_alpha.plates[-ndim:]
moments = GaussianMoments(shape)
parent_moments = [GaussianGammaMoments(())]
distribution = GaussianARDDistribution(shape)
plates = cls._total_plates(kwargs.get('plates'),
distribution.plates_from_parent(0, mu_alpha.plates))
parents = [mu_alpha]
return (parents,
kwargs,
moments.dims,
plates,
distribution,
moments,
parent_moments)
def initialize_from_parameters(self, mu, alpha):
# Explicit broadcasting so the shapes match
mu = mu * np.ones(np.shape(alpha))
alpha = alpha * np.ones(np.shape(mu))
# Compute parent moments
u = self._parent_moments[0].compute_fixed_moments(mu, alpha)
# Initialize distribution
self._initialize_from_parent_moments(u)
def initialize_from_mean_and_covariance(self, mu, Cov):
ndim = len(self._distribution.shape)
u = [mu, Cov + linalg.outer(mu, mu, ndim=ndim)]
mask = np.logical_not(self.observed)
# TODO: You could compute the CGF but it requires Cholesky of
# Cov. Do it later.
self._set_moments_and_cgf(u, np.nan, mask=mask)
return
def __str__(self):
mu = self.u[0]
Cov = self.u[1] - linalg.outer(mu, mu)
return ("%s ~ Gaussian(mu, Cov)\n"
" mu = \n"
"%s\n"
" Cov = \n"
"%s\n"
% (self.name, mu, Cov))
def rotate(self, R, inv=None, logdet=None, axis=-1, Q=None, subset=None, debug=False):
if Q is not None:
raise NotImplementedError()
if subset is not None:
raise NotImplementedError()
# TODO/FIXME: Combine and refactor all these rotation transformations
# into _GaussianTemplate
ndim = len(self._distribution.shape)
if inv is not None:
invR = inv
else:
invR = np.linalg.inv(R)
if logdet is not None:
logdetR = logdet
else:
logdetR = np.linalg.slogdet(R)[1]
self.phi[0] = rotate_mean(self.phi[0], invR.T,
axis=axis,
ndim=ndim)
self.phi[1] = rotate_covariance(self.phi[1], invR.T,
axis=axis,
ndim=ndim)
self.u[0] = rotate_mean(self.u[0], R,
axis=axis,
ndim=ndim)
self.u[1] = rotate_covariance(self.u[1], R,
axis=axis,
ndim=ndim)
s = list(self.dims[0])
s.pop(axis)
self.g -= logdetR * np.prod(s)
# TODO: This is all just debugging stuff and can be removed
if debug:
uh = [ui.copy() for ui in self.u]
gh = self.g.copy()
self._update_moments_and_cgf()
if any(not np.allclose(uih, ui, atol=1e-6) for (uih, ui) in zip(uh, self.u)):
raise RuntimeError("BUG")
if not np.allclose(self.g, gh, atol=1e-6):
raise RuntimeError("BUG")
return
def rotate_plates(self, Q, plate_axis=-1):
r"""
Approximate rotation of a plate axis.
Mean is rotated exactly but covariance/precision matrix is rotated
approximately.
"""
ndim = len(self._distribution.shape)
# Rotate moments using Q
if not isinstance(plate_axis, int):
raise ValueError("Plate axis must be integer")
if plate_axis >= 0:
plate_axis -= len(self.plates)
if plate_axis < -len(self.plates) or plate_axis >= 0:
raise ValueError("Axis out of bounds")
u0 = rotate_mean(self.u[0], Q,
ndim=ndim+(-plate_axis),
axis=0)
sumQ = misc.add_trailing_axes(np.sum(Q, axis=0),
2*ndim-plate_axis-1)
phi1 = sumQ**(-2) * self.phi[1]
phi0 = -2 * matrix_dot_vector(phi1, u0, ndim=ndim)
self.phi[0] = phi0
self.phi[1] = phi1
self._update_moments_and_cgf()
return
class GaussianGamma(ExponentialFamily):
r"""
Node for Gaussian-gamma (isotropic) random variables.
The prior:
.. math::
p(x, \alpha| \mu, \Lambda, a, b)
p(x|\alpha, \mu, \Lambda) = \mathcal{N}(x | \mu, \alpha Lambda)
p(\alpha|a, b) = \mathcal{G}(\alpha | a, b)
The posterior approximation :math:`q(x, \alpha)` has the same Gaussian-gamma
form.
Currently, supports only vector variables.
"""
@classmethod
def _constructor(cls, mu, Lambda, a, b, ndim=1, **kwargs):
r"""
Constructs distribution and moments objects.
This method is called if useconstructor decorator is used for __init__.
`mu` is the mean/location vector
`alpha` is the scale
`V` is the scale matrix
`n` is the degrees of freedom
"""
# Convert parent nodes
mu_Lambda = WrapToGaussianWishart(mu, Lambda, ndim=ndim)
a = cls._ensure_moments(a, GammaPriorMoments)
b = cls._ensure_moments(b, GammaMoments)
shape = mu_Lambda.dims[0]
distribution = GaussianGammaDistribution(ndim)
moments = GaussianGammaMoments(shape)
parent_moments = (
mu_Lambda._moments,
a._moments,
b._moments,
)
# Check shapes
if mu_Lambda.dims != ( shape, (), 2*shape, () ):
raise ValueError("mu and Lambda have wrong shape")
if a.dims != ( (), () ):
raise ValueError("a has wrong shape")
if b.dims != ( (), () ):
raise ValueError("b has wrong shape")
# List of parent nodes
parents = [mu_Lambda, a, b]
return (parents,
kwargs,
moments.dims,
cls._total_plates(kwargs.get('plates'),
distribution.plates_from_parent(0, mu_Lambda.plates),
distribution.plates_from_parent(1, a.plates),
distribution.plates_from_parent(2, b.plates)),
distribution,
moments,
parent_moments)
def translate(self, b, debug=False):
if self.ndim != 1:
raise NotImplementedError("Only ndim=1 supported at the moment")
tau = self.u[2]
x = self.u[0] / tau[...,None]
xb = linalg.outer(x, b, ndim=1)
bx = linalg.transpose(xb, ndim=1)
bb = linalg.outer(b, b, ndim=1)
uh = [
self.u[0] + tau[...,None] * b,
self.u[1] + tau[...,None,None] * (xb + bx + bb),
self.u[2],
self.u[3]
]
Lambda = -2 * self.phi[1]
dtau = -0.5 * (
np.einsum('...ij,...i,...j->...', Lambda, b, b)
+ 2 * np.einsum('...ij,...i,...j->...', Lambda, b, x)
)
phih = [
self.phi[0] + np.einsum('...ij,...j->...i', Lambda, b),
self.phi[1],
self.phi[2] + dtau,
self.phi[3]
]
self._check_shape(uh)
self._check_shape(phih)
self.phi = phih
self.u = uh
# TODO: This is all just debugging stuff and can be removed
if debug:
uh = [ui.copy() for ui in uh]
gh = self.g.copy()
self._update_moments_and_cgf()
if any(not np.allclose(uih, ui, atol=1e-6) for (uih, ui) in zip(uh, self.u)):
raise RuntimeError("BUG")
if not np.allclose(self.g, gh, atol=1e-6):
raise RuntimeError("BUG")
return
def rotate(self, R, inv=None, logdet=None, debug=False):
if self.ndim != 1:
raise NotImplementedError("Only ndim=1 supported at the moment")
if inv is None:
inv = np.linalg.inv(R)
if logdet is None:
logdet = np.linalg.slogdet(R)[1]
uh = [
rotate_mean(self.u[0], R),
rotate_covariance(self.u[1], R),
self.u[2],
self.u[3]
]
phih = [
rotate_mean(self.phi[0], inv.T),
rotate_covariance(self.phi[1], inv.T),
self.phi[2],
self.phi[3]
]
self._check_shape(uh)
self._check_shape(phih)
self.phi = phih
self.u = uh
self.g = self.g - logdet
# TODO: This is all just debugging stuff and can be removed
if debug:
uh = [ui.copy() for ui in uh]
gh = self.g.copy()
self._update_moments_and_cgf()
if any(not np.allclose(uih, ui, atol=1e-6) for (uih, ui) in zip(uh, self.u)):
raise RuntimeError("BUG")
if not np.allclose(self.g, gh, atol=1e-6):
raise RuntimeError("BUG")
return
def plotmatrix(self):
r"""
Creates a matrix of marginal plots.
On diagonal, are marginal plots of each variable. Off-diagonal plot
(i,j) shows the joint marginal density of x_i and x_j.
"""
import bayespy.plot as bpplt
if self.ndim != 1:
raise NotImplementedError("Only ndim=1 supported at the moment")
if np.prod(self.plates) != 1:
raise ValueError("Currently, does not support plates in the node.")
if len(self.dims[0]) != 1:
raise ValueError("Currently, supports only vector variables")
# Dimensionality of the Gaussian
D = self.dims[0][0]
# Compute standard parameters
tau = self.u[2]
mu = self.u[0]
mu = mu / misc.add_trailing_axes(tau, 1)
Cov = self.u[1] - linalg.outer(self.u[0], mu, ndim=1)
Cov = Cov / misc.add_trailing_axes(tau, 2)
a = self.phi[3]
b = -self.phi[2] - 0.5*linalg.inner(self.phi[0], mu, ndim=1)
# Create subplots
(fig, axes) = bpplt.pyplot.subplots(D+1, D+1)
# Plot marginal Student t distributions
for i in range(D):
for j in range(i+1):
if i == j:
bpplt._pdf_t(*(random.gaussian_gamma_to_t(mu[i],
Cov[i,i],
a,
b,
ndim=0)),
axes=axes[i,i])
else:
S = Cov[np.ix_([i,j],[i,j])]
(m, S, nu) = random.gaussian_gamma_to_t(mu[[i,j]],
S,
a,
b)
bpplt._contour_t(m, S, nu, axes=axes[i,j])
bpplt._contour_t(m, S, nu, axes=axes[j,i], transpose=True)
# Plot Gaussian-gamma marginal distributions
for k in range(D):
bpplt._contour_gaussian_gamma(mu[k], Cov[k,k], a, b,
axes=axes[D,k])
bpplt._contour_gaussian_gamma(mu[k], Cov[k,k], a, b,
axes=axes[k,D],
transpose=True)
# Plot gamma marginal distribution
bpplt._pdf_gamma(a, b, axes=axes[D,D])
return axes
def get_gaussian_location(self):
r"""
Return the mean and variance of the distribution
"""
if self.ndim != 1:
raise NotImplementedError("Only ndim=1 supported at the moment")
tau = self.u[2]
tau_mu = self.u[0]
return tau_mu / tau[...,None]
def get_gaussian_mean_and_variance(self):
r"""
Return the mean and variance of the distribution
"""
if self.ndim != 1:
raise NotImplementedError("Only ndim=1 supported at the moment")
a = self.phi[3]
nu = 2*a
if np.any(nu <= 1):
raise ValueError("Mean not defined for degrees of freedom <= 1")
if np.any(nu <= 2):
raise ValueError("Variance not defined if degrees of freedom <= 2")
tau = self.u[2]
tau_mu = self.u[0]
mu = tau_mu / misc.add_trailing_axes(tau, 1)
var = misc.get_diag(self.u[1], ndim=1) - tau_mu*mu
var = var / misc.add_trailing_axes(tau, 1)
var = nu / (nu-2) * var
return (mu, var)
def get_marginal_logpdf(self, gaussian=None, gamma=None):
r"""
Get the (marginal) log pdf of a subset of the variables
Parameters
----------
gaussian : list or None
Indices of the Gaussian variables to keep or None
gamma : bool or None
True if keep the gamma variable, otherwise False or None
Returns
-------
function
A function which computes log-pdf
"""
if self.ndim != 1:
raise NotImplementedError("Only ndim=1 supported at the moment")
if gaussian is None and not gamma:
raise ValueError("Must give some variables")
# Compute standard parameters
tau = self.u[2]
mu = self.u[0]
mu = mu / misc.add_trailing_axes(tau, 1)
Cov = np.linalg.inv(-2*self.phi[1])
if not np.allclose(Cov,
self.u[1] - linalg.outer(self.u[0], mu, ndim=1)):
raise Exception("WAAAT")
#Cov = Cov / misc.add_trailing_axes(tau, 2)
a = self.phi[3]
b = -self.phi[2] - 0.5*linalg.inner(self.phi[0], mu, ndim=1)
if not gamma:
# Student t distributions
inds = list(gaussian)
mu = mu[inds]
Cov = Cov[np.ix_(inds, inds)]
(mu, Cov, nu) = random.gaussian_gamma_to_t(mu,
Cov,
a,
b,
ndim=1)
L = linalg.chol(Cov)
logdet_Cov = linalg.chol_logdet(L)
D = len(inds)
def logpdf(x):
y = x - mu
v = linalg.chol_solve(L, y)
z2 = linalg.inner(y, v, ndim=1)
return random.t_logpdf(z2, logdet_Cov, nu, D)
return logpdf
elif gaussian is None:
# Gamma distribution
def logpdf(x):
logx = np.log(x)
return random.gamma_logpdf(b*x,
logx,
a*logx,
a*np.log(b),
special.gammaln(a))
return logpdf
else:
# Gaussian-gamma distribution
inds = list(gaussian)
mu = mu[inds]
Cov = Cov[np.ix_(inds, inds)]
D = len(inds)
L = linalg.chol(Cov)
logdet_Cov = linalg.chol_logdet(L)
def logpdf(x):
tau = x[...,-1]
logtau = np.log(tau)
x = x[...,:-1]
y = x - mu
v = linalg.chol_solve(L, y) * tau[...,None]
z2 = linalg.inner(y, v, ndim=1)
return (random.gaussian_logpdf(z2,
0,
0,
logdet_Cov + D*logtau,
D) +
random.gamma_logpdf(b*tau,
logtau,
a*logtau,
a*np.log(b),
special.gammaln(a)))
return logpdf
class GaussianWishart(ExponentialFamily):
r"""
Node for Gaussian-Wishart random variables.
The prior:
.. math::
p(x, \Lambda| \mu, \alpha, V, n)
p(x|\Lambda, \mu, \alpha) = \mathcal(N)(x | \mu, \alpha^{-1} Lambda^{-1})
p(\Lambda|V, n) = \mathcal(W)(\Lambda | n, V)
The posterior approximation :math:`q(x, \Lambda)` has the same Gaussian-Wishart form.
Currently, supports only vector variables.
"""
_distribution = GaussianWishartDistribution()
@classmethod
def _constructor(cls, mu, alpha, n, V, **kwargs):
r"""
Constructs distribution and moments objects.
This method is called if useconstructor decorator is used for __init__.
`mu` is the mean/location vector
`alpha` is the scale
`n` is the degrees of freedom
`V` is the scale matrix
"""
moments = GaussianWishartMoments(shape)
# Convert parent nodes
mu_alpha = WrapToGaussianGamma(mu, alpha)
D = mu_alpha.dims[0][0]
n = cls._ensure_moments(n, WishartPriorMoments)
V = cls._ensure_moments(V, WishartMoments)
parent_moments = (
mu_alpha._moments,
n._moments,
V._moments
)
# Check shapes
if mu_alpha.dims != ( (D,), (D,D), (), () ):
raise ValueError("mu and alpha have wrong shape")
if V.dims != ( (D,D), () ):
raise ValueError("Precision matrix has wrong shape")
if n.dims != ( (), () ):
raise ValueError("Degrees of freedom has wrong shape")
parents = [mu_alpha, n, V]
return (parents,
kwargs,
moments.dims,
cls._total_plates(kwargs.get('plates'),
cls._distribution.plates_from_parent(0, mu_alpha.plates),
cls._distribution.plates_from_parent(1, n.plates),
cls._distribution.plates_from_parent(2, V.plates)),
cls._distribution,
moments,
parent_moments)
#
# CONVERTERS
#
class GaussianToGaussianGamma(Deterministic):
r"""
Converter for Gaussian moments to Gaussian-gamma isotropic moments
Combines the Gaussian moments with gamma moments for a fixed value 1.
"""
def __init__(self, X, **kwargs):
r"""
"""
if not isinstance(X._moments, GaussianMoments):
raise ValueError("Wrong moments, should be Gaussian")
shape = X._moments.shape
self.ndim = X._moments.ndim
self._moments = GaussianGammaMoments(shape)
self._parent_moments = [GaussianMoments(shape)]
shape = X.dims[0]
dims = ( shape, 2*shape, (), () )
super().__init__(X, dims=dims, **kwargs)
def _compute_moments(self, u_X):
r"""
"""
x = u_X[0]
xx = u_X[1]
u = [x, xx, 1, 0]
return u
def _compute_message_to_parent(self, index, m_child, u_X):
r"""
"""
if index == 0:
m = m_child[:2]
return m
else:
raise ValueError("Invalid parent index")
GaussianMoments.add_converter(GaussianGammaMoments,
GaussianToGaussianGamma)
class GaussianGammaToGaussianWishart(Deterministic):
r"""
"""
def __init__(self, X_alpha, **kwargs):
raise NotImplementedError()
GaussianGammaMoments.add_converter(GaussianWishartMoments,
GaussianGammaToGaussianWishart)
#
# WRAPPERS
#
# These wrappers form a single node from two nodes for messaging purposes.
#
class WrapToGaussianGamma(Deterministic):
r"""
"""
def __init__(self, X, alpha, ndim=None, **kwargs):
r"""
"""
# In case X is a numerical array, convert it to Gaussian first
try:
X = self._ensure_moments(X, GaussianMoments, ndim=ndim)
except Moments.NoConverterError:
pass
try:
ndim = X._moments.ndim
except AttributeError as err:
raise TypeError("ndim needs to be given explicitly") from err
X = self._ensure_moments(X, GaussianGammaMoments, ndim=ndim)
if len(X.dims[0]) != ndim:
raise RuntimeError("Conversion failed ndim.")
shape = X.dims[0]
dims = ( shape, 2 * shape, (), () )
self.shape = shape
self.ndim = len(shape)
self._moments = GaussianGammaMoments(shape)
self._parent_moments = [
GaussianGammaMoments(shape),
GammaMoments()
]
super().__init__(X, alpha, dims=dims, **kwargs)
def _compute_moments(self, u_X, u_alpha):
r"""
"""
(tau_x, tau_xx, tau, logtau) = u_X
(alpha, logalpha) = u_alpha
u0 = tau_x * misc.add_trailing_axes(alpha, self.ndim)
u1 = tau_xx * misc.add_trailing_axes(alpha, 2 * self.ndim)
u2 = tau * alpha
u3 = logtau + logalpha
return [u0, u1, u2, u3]
def _compute_message_to_parent(self, index, m_child, u_X, u_alpha):
r"""
"""
if index == 0:
alpha = u_alpha[0]
m0 = m_child[0] * misc.add_trailing_axes(alpha, self.ndim)
m1 = m_child[1] * misc.add_trailing_axes(alpha, 2 * self.ndim)
m2 = m_child[2] * alpha
m3 = m_child[3]
return [m0, m1, m2, m3]
elif index == 1:
(tau_x, tau_xx, tau, logtau) = u_X
m0 = (
linalg.inner(m_child[0], tau_x, ndim=self.ndim)
+ linalg.inner(m_child[1], tau_xx, ndim=2*self.ndim)
+ m_child[2] * tau
)
m1 = m_child[3]
return [m0, m1]
else:
raise ValueError("Invalid parent index")
class WrapToGaussianWishart(Deterministic):
r"""
Wraps Gaussian and Wishart nodes into a Gaussian-Wishart node.
The following node combinations can be wrapped:
* Gaussian and Wishart
* Gaussian-gamma and Wishart
* Gaussian-Wishart and gamma
"""
def __init__(self, X, Lambda, ndim=1, **kwargs):
r"""
"""
# Just in case X is an array, convert it to a Gaussian node first.
try:
X = self._ensure_moments(X, GaussianMoments, ndim=ndim)
except Moments.NoConverterError:
pass
try:
# Try combo Gaussian-Gamma and Wishart
X = self._ensure_moments(X, GaussianGammaMoments, ndim=ndim)
except ValueError:
# Have to use Gaussian-Wishart and Gamma
X = self._ensure_moments(X, GaussianWishartMoments, ndim=ndim)
Lambda = self._ensure_moments(Lambda, GammaMoments, ndim=ndim)
shape = X.dims[0]
if Lambda.dims != ((), ()):
raise ValueError(
"Mean and precision have inconsistent shapes: {0} and {1}"
.format(
X.dims,
Lambda.dims
)
)
self.wishart = False
else:
# Gaussian-Gamma and Wishart
shape = X.dims[0]
Lambda = self._ensure_moments(Lambda, WishartMoments, ndim=ndim)
if Lambda.dims != (2 * shape, ()):
raise ValueError(
"Mean and precision have inconsistent shapes: {0} and {1}"
.format(
X.dims,
Lambda.dims
)
)
self.wishart = True
self.ndim = len(shape)
self._parent_moments = (
X._moments,
Lambda._moments,
)
self._moments = GaussianWishartMoments(shape)
super().__init__(X, Lambda, dims=self._moments.dims, **kwargs)
def _compute_moments(self, u_X_alpha, u_Lambda):
r"""
"""
if self.wishart:
alpha_x = u_X_alpha[0]
alpha_xx = u_X_alpha[1]
alpha = u_X_alpha[2]
log_alpha = u_X_alpha[3]
Lambda = u_Lambda[0]
logdet_Lambda = u_Lambda[1]
D = np.prod(self.dims[0])
u0 = linalg.mvdot(Lambda, alpha_x, ndim=self.ndim)
u1 = linalg.inner(Lambda, alpha_xx, ndim=2*self.ndim)
u2 = Lambda * misc.add_trailing_axes(alpha, 2*self.ndim)
u3 = logdet_Lambda + D * log_alpha
u = [u0, u1, u2, u3]
return u
else:
raise NotImplementedError()
def _compute_message_to_parent(self, index, m_child, u_X_alpha, u_Lambda):
r"""
...
Message from the child is :math:`[m_0, m_1, m_2, m_3]`:
.. math::
\alpha m_0^T \Lambda x + m_1 \alpha x^T \Lambda x
+ \mathrm{tr}(\alpha m_2 \Lambda) + m_3 (\log | \alpha \Lambda |)
In case of Gaussian-gamma and Wishart parents:
Message to the first parent (x, alpha):
.. math::
\tilde{m_0} &= \Lambda m_0
\\
\tilde{m_1} &= m_1 \Lambda
\\
\tilde{m_2} &= \mathrm{tr}(m_2 \Lambda)
\\
\tilde{m_3} &= m_3 \cdot D
Message to the second parent (Lambda):
.. math::
\tilde{m_0} &= \alpha (\frac{1}{2} m_0 x^T + \frac{1}{2} x m_0^T +
m_1 xx^T + m_2)
\\
\tilde{m_1} &= m_3
"""
if index == 0:
if self.wishart:
# Message to Gaussian-gamma (isotropic)
Lambda = u_Lambda[0]
D = np.prod(self.dims[0])
m0 = linalg.mvdot(Lambda, m_child[0], ndim=self.ndim)
m1 = Lambda * misc.add_trailing_axes(m_child[1], 2*self.ndim)
m2 = linalg.inner(Lambda, m_child[2], ndim=2*self.ndim)
m3 = D * m_child[3]
m = [m0, m1, m2, m3]
return m
else:
# Message to Gaussian-Wishart
raise NotImplementedError()
elif index == 1:
if self.wishart:
# Message to Wishart
alpha_x = u_X_alpha[0]
alpha_xx = u_X_alpha[1]
alpha = u_X_alpha[2]
m0 = (0.5*linalg.outer(alpha_x, m_child[0], ndim=self.ndim) +
0.5*linalg.outer(m_child[0], alpha_x, ndim=self.ndim) +
alpha_xx * misc.add_trailing_axes(m_child[1], 2*self.ndim) +
misc.add_trailing_axes(alpha, 2*self.ndim) * m_child[2])
m1 = m_child[3]
m = [m0, m1]
return m
else:
# Message to gamma (isotropic)
raise NotImplementedError()
else:
raise ValueError("Invalid parent index")
def reshape_gaussian_array(dims_from, dims_to, x0, x1):
r"""
Reshape the moments Gaussian array variable.
The plates remain unaffected.
"""
num_dims_from = len(dims_from)
num_dims_to = len(dims_to)
# Reshape the first moment / mean
num_plates_from = np.ndim(x0) - num_dims_from
plates_from = np.shape(x0)[:num_plates_from]
shape = (
plates_from
+ (1,)*(num_dims_to-num_dims_from) + dims_from
)
x0 = np.ones(dims_to) * np.reshape(x0, shape)
# Reshape the second moment / covariance / precision
num_plates_from = np.ndim(x1) - 2*num_dims_from
plates_from = np.shape(x1)[:num_plates_from]
shape = (
plates_from
+ (1,)*(num_dims_to-num_dims_from) + dims_from
+ (1,)*(num_dims_to-num_dims_from) + dims_from
)
x1 = np.ones(dims_to+dims_to) * np.reshape(x1, shape)
return (x0, x1)
def transpose_covariance(Cov, ndim=1):
r"""
Transpose the covariance array of Gaussian array variable.
That is, swap the last ndim axes with the ndim axes before them. This makes
transposing easy for array variables when the covariance is not a matrix but
a multidimensional array.
"""
axes_in = [Ellipsis] + list(range(2*ndim,0,-1))
axes_out = [Ellipsis] + list(range(ndim,0,-1)) + list(range(2*ndim,ndim,-1))
return np.einsum(Cov, axes_in, axes_out)
def left_rotate_covariance(Cov, R, axis=-1, ndim=1):
r"""
Rotate the covariance array of Gaussian array variable.
ndim is the number of axes for the Gaussian variable.
For vector variable, ndim=1 and covariance is a matrix.
"""
if not isinstance(axis, int):
raise ValueError("Axis must be an integer")
if axis < -ndim or axis >= ndim:
raise ValueError("Axis out of range")
# Force negative axis
if axis >= 0:
axis -= ndim
# Rotation from left
axes_R = [Ellipsis, ndim+abs(axis)+1, ndim+abs(axis)]
axes_Cov = [Ellipsis] + list(range(ndim+abs(axis),
0,
-1))
axes_out = [Ellipsis, ndim+abs(axis)+1] + list(range(ndim+abs(axis)-1,
0,
-1))
Cov = np.einsum(R, axes_R, Cov, axes_Cov, axes_out)
return Cov
def right_rotate_covariance(Cov, R, axis=-1, ndim=1):
r"""
Rotate the covariance array of Gaussian array variable.
ndim is the number of axes for the Gaussian variable.
For vector variable, ndim=1 and covariance is a matrix.
"""
if not isinstance(axis, int):
raise ValueError("Axis must be an integer")
if axis < -ndim or axis >= ndim:
raise ValueError("Axis out of range")
# Force negative axis
if axis >= 0:
axis -= ndim
# Rotation from right
axes_R = [Ellipsis, abs(axis)+1, abs(axis)]
axes_Cov = [Ellipsis] + list(range(abs(axis),
0,
-1))
axes_out = [Ellipsis, abs(axis)+1] + list(range(abs(axis)-1,
0,
-1))
Cov = np.einsum(R, axes_R, Cov, axes_Cov, axes_out)
return Cov
def rotate_covariance(Cov, R, axis=-1, ndim=1):
r"""
Rotate the covariance array of Gaussian array variable.
ndim is the number of axes for the Gaussian variable.
For vector variable, ndim=1 and covariance is a matrix.
"""
# Rotate from left and right
Cov = left_rotate_covariance(Cov, R, ndim=ndim, axis=axis)
Cov = right_rotate_covariance(Cov, R, ndim=ndim, axis=axis)
return Cov
def rotate_mean(mu, R, axis=-1, ndim=1):
r"""
Rotate the mean array of Gaussian array variable.
ndim is the number of axes for the Gaussian variable.
For vector variable, ndim=1 and mu is a vector.
"""
if not isinstance(axis, int):
raise ValueError("Axis must be an integer")
if axis < -ndim or axis >= ndim:
raise ValueError("Axis out of range")
# Force negative axis
if axis >= 0:
axis -= ndim
# Rotation from right
axes_R = [Ellipsis, abs(axis)+1, abs(axis)]
axes_mu = [Ellipsis] + list(range(abs(axis),
0,
-1))
axes_out = [Ellipsis, abs(axis)+1] + list(range(abs(axis)-1,
0,
-1))
mu = np.einsum(R, axes_R, mu, axes_mu, axes_out)
return mu
def array_to_vector(x, ndim=1):
if ndim == 0:
return x
shape_x = np.shape(x)
D = np.prod(shape_x[-ndim:])
return np.reshape(x, shape_x[:-ndim] + (D,))
def array_to_matrix(A, ndim=1):
if ndim == 0:
return A
shape_A = np.shape(A)
D = np.prod(shape_A[-ndim:])
return np.reshape(A, shape_A[:-2*ndim] + (D,D))
def vector_to_array(x, shape):
shape_x = np.shape(x)
return np.reshape(x, np.shape(x)[:-1] + tuple(shape))
def matrix_dot_vector(A, x, ndim=1):
if ndim < 0:
raise ValueError("ndim must be non-negative integer")
if ndim == 0:
return A*x
dims_x = np.shape(x)[-ndim:]
A = array_to_matrix(A, ndim=ndim)
x = array_to_vector(x, ndim=ndim)
y = np.einsum('...ik,...k->...i', A, x)
return vector_to_array(y, dims_x)
class ConcatGaussian(Deterministic):
"""Concatenate Gaussian vectors along the variable axis (not plate axis)
NOTE: This concatenates on the variable axis! That is, the dimensionality
of the resulting Gaussian vector is the sum of the dimensionalities of the
input Gaussian vectors.
TODO: Add support for Gaussian arrays and arbitrary concatenation axis.
"""
def __init__(self, *nodes, **kwargs):
# Number of nodes to concatenate
N = len(nodes)
# This is stuff that will be useful when implementing arbitrary
# concatenation. That is, first determine ndim.
#
# # Convert nodes to Gaussians (if they are not nodes, don't worry)
# nodes_gaussian = []
# for node in nodes:
# try:
# node_gaussian = node._convert(GaussianMoments)
# except AttributeError: # Moments.NoConverterError:
# nodes_gaussian.append(node)
# else:
# nodes_gaussian.append(node_gaussian)
# nodes = nodes_gaussian
#
# # Determine shape from the first Gaussian node
# shape = None
# for node in nodes:
# try:
# shape = node.dims[0]
# except AttibuteError:
# pass
# else:
# break
# if shape is None:
# raise ValueError("Couldn't determine shape from the input nodes")
#
# ndim = len(shape)
shape = (D,)
moments = GaussianMoments(shape)
nodes = [node._ensure_moments(GaussianMoments, ndim=1)
for node in nodes]
self._parent_moments = [node._moments for node in nodes]
# Make sure all parents are Gaussian vectors
if any(len(node.dims[0]) != 1 for node in nodes):
raise ValueError("Input nodes must be (Gaussian) vectors")
self.slices = tuple(np.cumsum([0] + [node.dims[0][0] for node in nodes]))
D = self.slices[-1]
return super().__init__(*nodes, dims=((D,), (D, D)), **kwargs)
def _compute_moments(self, *u_nodes):
x = misc.concatenate(*[u[0] for u in u_nodes], axis=-1)
xx = misc.block_diag(*[u[1] for u in u_nodes])
# Explicitly broadcast xx to plates of x
x_plates = np.shape(x)[:-1]
xx = np.ones(x_plates)[...,None,None] * xx
# Compute the cross-covariance terms using the means of each variable
# (because covariances are zero for factorized nodes in the VB
# approximation)
i_start = 0
for m in range(len(u_nodes)):
i_end = i_start + np.shape(u_nodes[m][0])[-1]
j_start = 0
for n in range(m):
j_end = j_start + np.shape(u_nodes[n][0])[-1]
xm_xn = linalg.outer(u_nodes[m][0], u_nodes[n][0], ndim=1)
xx[...,i_start:i_end,j_start:j_end] = xm_xn
xx[...,j_start:j_end,i_start:i_end] = misc.T(xm_xn)
j_start = j_end
i_start = i_end
return [x, xx]
def _compute_message_to_parent(self, i, m, *u_nodes):
r = self.slices
# Pick the proper parts from the message array
m0 = m[0][...,r[i]:r[i+1]]
m1 = m[1][...,r[i]:r[i+1],r[i]:r[i+1]]
# Handle cross-covariance terms by using the mean of the covariate node
for (j, u) in enumerate(u_nodes):
if j != i:
m0 = m0 + 2 * np.einsum(
'...ij,...j->...i',
m[1][...,r[i]:r[i+1],r[j]:r[j+1]],
u[0]
)
return [m0, m1]
| 30.258234 | 108 | 0.506395 |
bb0929aa7baffd8ed84098b3f43a53f112a87b4b
| 8,244 |
py
|
Python
|
oskut/deepcut/train.py
|
mrpeerat/OSKut
|
0c103cde154fe7b93ac8228b9bb1123a992b51c8
|
[
"MIT"
] | 14 |
2021-08-01T06:18:30.000Z
|
2022-03-31T17:20:24.000Z
|
oskut/deepcut/train.py
|
mrpeerat/OSKut
|
0c103cde154fe7b93ac8228b9bb1123a992b51c8
|
[
"MIT"
] | null | null | null |
oskut/deepcut/train.py
|
mrpeerat/OSKut
|
0c103cde154fe7b93ac8228b9bb1123a992b51c8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
import os
from glob import glob
import pandas as pd
from functools import reduce
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_score, recall_score, f1_score
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from .utils import create_n_gram_df, CHAR_TYPE_FLATTEN, CHARS_MAP, CHAR_TYPES_MAP
from .model import get_convo_nn2
#article_types = ['article', 'encyclopedia', 'news', 'novel']
#article_types = ['wisesight']
#article_types = ['tnhc_train']
article_types = ['lst20_train']
def generate_words(files):
"""
Transform list of files to list of words,
removing new line character
and replace name entity '<NE>...</NE>' and abbreviation '<AB>...</AB>' symbol
"""
repls = {'<NE>' : '','</NE>' : '','<AB>': '','</AB>': '','<p>':'','<s>':''}
words_all = []
for _, file in enumerate(files):
lines = open(file, 'r')
for line in lines:
line = reduce(lambda a, kv: a.replace(*kv), repls.items(), line)
words = [word for word in line.split("|") if word is not '\n']
words_all.extend(words)
return words_all
def create_char_dataframe(words):
"""
Give list of input tokenized words,
create dataframe of characters where first character of
the word is tagged as 1, otherwise 0
Example
=======
['กิน', 'หมด'] to dataframe of
[{'char': 'ก', 'type': ..., 'target': 1}, ...,
{'char': 'ด', 'type': ..., 'target': 0}]
"""
char_dict = []
for word in words:
for i, char in enumerate(word):
if i == 0:
char_dict.append({'char': char,
'type': CHAR_TYPE_FLATTEN.get(char, 'o'),
'target': True})
else:
char_dict.append({'char': char,
'type': CHAR_TYPE_FLATTEN.get(char, 'o'),
'target': False})
return pd.DataFrame(char_dict)
def generate_best_dataset(best_path, output_path='cleaned_data', create_val=True):
"""
Generate CSV file for training and testing data
Input
=====
best_path: str, path to BEST folder which contains unzipped subfolder
'article', 'encyclopedia', 'news', 'novel'
cleaned_data: str, path to output folder, the cleaned data will be saved
in the given folder name where training set will be stored in `train` folder
and testing set will be stored on `test` folder
create_val: boolean, True or False, if True, divide training set into training set and
validation set in `val` folder
"""
if not os.path.isdir(output_path):
os.mkdir(output_path)
if not os.path.isdir(os.path.join(output_path, 'train')):
os.makedirs(os.path.join(output_path, 'train'))
if not os.path.isdir(os.path.join(output_path, 'test')):
os.makedirs(os.path.join(output_path, 'test'))
if not os.path.isdir(os.path.join(output_path, 'val')) and create_val:
os.makedirs(os.path.join(output_path, 'val'))
for article_type in article_types:
files = glob(os.path.join(best_path, article_type, '*.txt'))
files_train, files_test = train_test_split(files, random_state=42, test_size=0.1)
if create_val:
files_train, files_val = train_test_split(files_train, random_state=0, test_size=0.1)
val_words = generate_words(files_val)
val_df = create_char_dataframe(val_words)
val_df.to_csv(os.path.join(output_path, 'val', 'df_best_{}_val.csv'.format(article_type)), index=False)
train_words = generate_words(files_train)
test_words = generate_words(files_test)
train_df = create_char_dataframe(train_words)
test_df = create_char_dataframe(test_words)
train_df.to_csv(os.path.join(output_path, 'train', 'df_best_{}_train.csv'.format(article_type)), index=False)
test_df.to_csv(os.path.join(output_path, 'test', 'df_best_{}_test.csv'.format(article_type)), index=False)
print("Save {} to CSV file".format(article_type))
def prepare_feature(best_processed_path, option='train'):
"""
Transform processed path into feature matrix and output array
Input
=====
best_processed_path: str, path to processed BEST dataset
option: str, 'train' or 'test'
"""
# padding for training and testing set
n_pad = 21
n_pad_2 = int((n_pad - 1)/2)
pad = [{'char': ' ', 'type': 'p', 'target': True}]
df_pad = pd.DataFrame(pad * n_pad_2)
df = []
for article_type in article_types:
df.append(pd.read_csv(os.path.join(best_processed_path, option, 'df_best_{}_{}.csv'.format(article_type, option))))
df = pd.concat(df)
df = pd.concat((df_pad, df, df_pad)) # pad with empty string feature
df['char'] = df['char'].map(lambda x: CHARS_MAP.get(x, 80))
df['type'] = df['type'].map(lambda x: CHAR_TYPES_MAP.get(x, 4))
df_pad = create_n_gram_df(df, n_pad=n_pad)
char_row = ['char' + str(i + 1) for i in range(n_pad_2)] + \
['char-' + str(i + 1) for i in range(n_pad_2)] + ['char']
type_row = ['type' + str(i + 1) for i in range(n_pad_2)] + \
['type-' + str(i + 1) for i in range(n_pad_2)] + ['type']
x_char = df_pad[char_row].values
x_type = df_pad[type_row].values
y = df_pad['target'].astype(int).values
return x_char, x_type, y
def train_model(best_processed_path, weight_path='../weight/model_weight_lst20.h5', verbose=2):
"""
Given path to processed BEST dataset,
train CNN model for words beginning alongside with
character label encoder and character type label encoder
Input
=====
best_processed_path: str, path to processed BEST dataset
weight_path: str, path to weight path file
verbose: int, verbost option for training Keras model
Output
======
model: keras model, keras model for tokenize prediction
"""
x_train_char, x_train_type, y_train = prepare_feature(best_processed_path, option='train')
# x_test_char, x_test_type, y_test = prepare_feature(best_processed_path, option='test')
validation_set = True
if os.path.isdir(os.path.join(best_processed_path, 'val')):
validation_set = True
x_val_char, x_val_type, y_val = prepare_feature(best_processed_path, option='val')
if not os.path.isdir(os.path.dirname(weight_path)):
os.makedirs(os.path.dirname(weight_path)) # make directory if weight does not exist
callbacks_list = [
ReduceLROnPlateau(),
ModelCheckpoint(
weight_path,
save_best_only=True,
save_weights_only=True,
monitor='val_loss',
mode='min',
verbose=1
)
]
# train model
model = get_convo_nn2()
#model.load_weights('cnn_without_ne_ab.h5')
train_params = [(10, 256), (3, 512), (3, 2048), (3, 4096), (3, 8192)]
for (epochs, batch_size) in train_params:
print("train with {} epochs and {} batch size".format(epochs, batch_size))
if validation_set:
model.fit([x_train_char, x_train_type], y_train,
epochs=epochs, batch_size=batch_size,
verbose=verbose,
callbacks=callbacks_list,
validation_data=([x_val_char, x_val_type], y_val))
else:
model.fit([x_train_char, x_train_type], y_train,
epochs=epochs, batch_size=batch_size,
verbose=verbose,
callbacks=callbacks_list)
return model
def evaluate(best_processed_path, model):
"""
Evaluate model on splitted 10 percent testing set
"""
x_test_char, x_test_type, y_test = prepare_feature(best_processed_path, option='test')
# return x_test_char, x_test_type, y_test
y_predict = model.predict([x_test_char, x_test_type])
y_predict = (y_predict.ravel() > 0.5).astype(int)
f1score = f1_score(y_test, y_predict)
precision = precision_score(y_test, y_predict)
recall = recall_score(y_test, y_predict)
return f1score, precision, recall
| 37.990783 | 123 | 0.634522 |
6015900cb87cb201b2737301cd7617275b986616
| 11,215 |
py
|
Python
|
families/config/sawtooth_config/processor/handler.py
|
trust-tech/sawtooth-core
|
fcd66ff2f13dba51d7642049e0c0306dbee3b07d
|
[
"Apache-2.0"
] | null | null | null |
families/config/sawtooth_config/processor/handler.py
|
trust-tech/sawtooth-core
|
fcd66ff2f13dba51d7642049e0c0306dbee3b07d
|
[
"Apache-2.0"
] | null | null | null |
families/config/sawtooth_config/processor/handler.py
|
trust-tech/sawtooth-core
|
fcd66ff2f13dba51d7642049e0c0306dbee3b07d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
import hashlib
import base64
from functools import lru_cache
from sawtooth_sdk.processor.state import StateEntry
from sawtooth_sdk.messaging.future import FutureTimeoutError
from sawtooth_sdk.processor.exceptions import InvalidTransaction
from sawtooth_sdk.processor.exceptions import InternalError
from sawtooth_sdk.protobuf.transaction_pb2 import TransactionHeader
from sawtooth_config.protobuf.config_pb2 import ConfigPayload
from sawtooth_config.protobuf.config_pb2 import ConfigProposal
from sawtooth_config.protobuf.config_pb2 import ConfigVote
from sawtooth_config.protobuf.config_pb2 import ConfigCandidate
from sawtooth_config.protobuf.config_pb2 import ConfigCandidates
from sawtooth_config.protobuf.setting_pb2 import Setting
LOGGER = logging.getLogger(__name__)
# The config namespace is special: it is not derived from a hash.
CONFIG_NAMESPACE = '000000'
# Number of seconds to wait for state operations to succeed
STATE_TIMEOUT_SEC = 10
class ConfigurationTransactionHandler(object):
@property
def family_name(self):
return 'sawtooth_config'
@property
def family_versions(self):
return ['1.0']
@property
def encodings(self):
return ['application/protobuf']
@property
def namespaces(self):
return [CONFIG_NAMESPACE]
def apply(self, transaction, state):
txn_header = TransactionHeader()
txn_header.ParseFromString(transaction.header)
pubkey = txn_header.signer_pubkey
auth_keys = _get_auth_keys(state)
if len(auth_keys) > 0 and pubkey not in auth_keys:
raise InvalidTransaction(
'{} is not authorized to change settings'.format(pubkey))
config_payload = ConfigPayload()
config_payload.ParseFromString(transaction.payload)
if config_payload.action == ConfigPayload.PROPOSE:
return self._apply_proposal(
auth_keys, pubkey, config_payload.data, state)
elif config_payload.action == ConfigPayload.VOTE:
return self._apply_vote(pubkey, config_payload.data,
auth_keys, state)
else:
raise InvalidTransaction(
"'action' must be one of {PROPOSE, VOTE} in 'Ballot' mode")
def _apply_proposal(self, auth_keys, pubkey, config_proposal_data, state):
config_proposal = ConfigProposal()
config_proposal.ParseFromString(config_proposal_data)
proposal_id = hashlib.sha256(config_proposal_data).hexdigest()
approval_threshold = _get_approval_threshold(state)
_validate_setting(auth_keys,
config_proposal.setting,
config_proposal.value)
if approval_threshold > 1:
config_candidates = _get_config_candidates(state)
existing_candidate = _first(
config_candidates.candidates,
lambda candidate: candidate.proposal_id == proposal_id)
if existing_candidate is not None:
raise InvalidTransaction(
'Duplicate proposal for {}'.format(
config_proposal.setting))
record = ConfigCandidate.VoteRecord(
public_key=pubkey,
vote=ConfigVote.ACCEPT)
config_candidates.candidates.add(
proposal_id=proposal_id,
proposal=config_proposal,
votes=[record]
)
LOGGER.debug('Proposal made to set %s to %s',
config_proposal.setting,
config_proposal.value)
_save_config_candidates(state, config_candidates)
else:
_set_config_value(state,
config_proposal.setting,
config_proposal.value)
def _apply_vote(self, pubkey, config_vote_data, authorized_keys, state):
config_vote = ConfigVote()
config_vote.ParseFromString(config_vote_data)
proposal_id = config_vote.proposal_id
config_candidates = _get_config_candidates(state)
candidate = _first(
config_candidates.candidates,
lambda candidate: candidate.proposal_id == proposal_id)
if candidate is None:
raise InvalidTransaction(
"Proposal {} does not exist.".format(proposal_id))
candidate_index = _index_of(config_candidates.candidates, candidate)
approval_threshold = _get_approval_threshold(state)
vote_record = _first(candidate.votes,
lambda record: record.public_key == pubkey)
if vote_record is not None:
raise InvalidTransaction(
'{} has already voted'.format(pubkey))
candidate.votes.add(
public_key=pubkey,
vote=config_vote.vote)
accepted_count = 0
rejected_count = 0
for vote_record in candidate.votes:
if vote_record.vote == ConfigVote.ACCEPT:
accepted_count += 1
elif vote_record.vote == ConfigVote.REJECT:
rejected_count += 1
if accepted_count >= approval_threshold:
_set_config_value(state,
candidate.proposal.setting,
candidate.proposal.value)
del config_candidates.candidates[candidate_index]
elif rejected_count >= approval_threshold or \
(rejected_count + accepted_count) == len(authorized_keys):
LOGGER.debug('Proposal for %s was rejected',
candidate.proposal.setting)
del config_candidates.candidates[candidate_index]
else:
LOGGER.debug('Vote recorded for %s',
candidate.proposal.setting)
_save_config_candidates(state, config_candidates)
def _get_config_candidates(state):
value = _get_config_value(state, 'sawtooth.config.vote.proposals')
if not value:
return ConfigCandidates(candidates={})
config_candidates = ConfigCandidates()
config_candidates.ParseFromString(base64.b64decode(value))
return config_candidates
def _save_config_candidates(state, config_candidates):
_set_config_value(state,
'sawtooth.config.vote.proposals',
base64.b64encode(config_candidates.SerializeToString()))
def _get_approval_threshold(state):
return int(_get_config_value(
state, 'sawtooth.config.vote.approval_threshold', 1))
def _get_auth_keys(state):
value = _get_config_value(
state, 'sawtooth.config.vote.authorized_keys', '')
return _split_ignore_empties(value)
def _split_ignore_empties(value):
return [v.strip() for v in value.split(',') if len(v) > 0]
def _validate_setting(auth_keys, setting, value):
if len(auth_keys) == 0 and \
setting != 'sawtooth.config.vote.authorized_keys':
raise InvalidTransaction(
'Cannot set {} until authorized_keys is set.'.format(setting))
if setting == 'sawtooth.config.vote.authorized_keys':
if len(_split_ignore_empties(value)) == 0:
raise InvalidTransaction('authorized_keys must not be empty.')
if setting == 'sawtooth.config.vote.approval_threshold':
threshold = None
try:
threshold = int(value)
except ValueError:
raise InvalidTransaction('approval_threshold must be an integer')
if threshold > len(auth_keys):
raise InvalidTransaction(
'approval_threshold must be less than or equal to number of '
'authorized_keys')
if setting == 'sawtooth.config.vote.proposals':
raise InvalidTransaction(
'Setting sawtooth.config.vote.proposals is read-only')
def _get_config_value(state, key, default_value=None):
address = _make_config_key(key)
setting = _get_setting_entry(state, address)
for entry in setting.entries:
if key == entry.key:
return entry.value
return default_value
def _set_config_value(state, key, value):
address = _make_config_key(key)
setting = _get_setting_entry(state, address)
old_value = None
old_entry_index = None
for i, entry in enumerate(setting.entries):
if key == entry.key:
old_value = entry.value
old_entry_index = i
if old_entry_index is not None:
setting.entries[old_entry_index].value = value
else:
setting.entries.add(key=key, value=value)
try:
addresses = list(state.set(
[StateEntry(address=address,
data=setting.SerializeToString())],
timeout=STATE_TIMEOUT_SEC))
except FutureTimeoutError:
LOGGER.warning(
'Timeout occured on state.set([%s, <value>])', address)
raise InternalError('Unable to set {}'.format(key))
if len(addresses) != 1:
LOGGER.warning(
'Failed to save value on address %s', address)
raise InternalError(
'Unable to save config value {}'.format(key))
if setting != 'sawtooth.config.vote.proposals':
LOGGER.info('Config setting %s changed from %s to %s',
key, old_value, value)
def _get_setting_entry(state, address):
setting = Setting()
try:
entries_list = state.get([address], timeout=STATE_TIMEOUT_SEC)
except FutureTimeoutError:
LOGGER.warning('Timeout occured on state.get([%s])', address)
raise InternalError('Unable to get {}'.format(address))
if len(entries_list) != 0:
setting.ParseFromString(entries_list[0].data)
return setting
def _to_hash(value):
return hashlib.sha256(value.encode()).hexdigest()
def _first(a_list, pred):
return next((x for x in a_list if pred(x)), None)
def _index_of(iterable, obj):
return next((i for i, x in enumerate(iterable) if x == obj), -1)
_MAX_KEY_PARTS = 4
_ADDRESS_PART_SIZE = 16
_EMPTY_PART = _to_hash('')[:_ADDRESS_PART_SIZE]
@lru_cache(maxsize=128)
def _make_config_key(key):
# split the key into 4 parts, maximum
key_parts = key.split('.', maxsplit=_MAX_KEY_PARTS - 1)
# compute the short hash of each part
addr_parts = [_to_hash(x)[:_ADDRESS_PART_SIZE] for x in key_parts]
# pad the parts with the empty hash, if needed
addr_parts.extend([_EMPTY_PART] * (_MAX_KEY_PARTS - len(addr_parts)))
return CONFIG_NAMESPACE + ''.join(addr_parts)
| 34.296636 | 80 | 0.654837 |
a0cdc2f5a3a8a8e55e98756b7a91d9b34190940f
| 1,292 |
py
|
Python
|
main.py
|
GhostUser/Flappy-Bird-
|
31279674b3d82244aa326726f399aeb056d83377
|
[
"MIT"
] | null | null | null |
main.py
|
GhostUser/Flappy-Bird-
|
31279674b3d82244aa326726f399aeb056d83377
|
[
"MIT"
] | 4 |
2020-10-22T16:10:12.000Z
|
2020-10-22T16:13:53.000Z
|
main.py
|
GhostUser/Flappy-Bird-
|
31279674b3d82244aa326726f399aeb056d83377
|
[
"MIT"
] | null | null | null |
import pygame
from pygame import event
import sys
class Screen:
NUM_BLOCKS_X = 9
NUM_BLOCKS_Y = 17
block_size = 30
WIDTH, HEIGHT = block_size * NUM_BLOCKS_X, block_size * NUM_BLOCKS_Y
FPS = 32
window=pygame.display.set_mode((WIDTH,HEIGHT))
bg=pygame.image.load('pictures\\background.png')
bird=pygame.image.load('pictures\\bird.png')
platorm=pygame.image.load('pictures\\base.png')
class Bird:
Co_X=0
Co_Y=0
bird=pygame.image.load('pictures\\bird.png')
def bg_win():
Screen.window.blit(Screen.bg, (0,0))
Screen.window.blit(Screen.bird, (Screen.WIDTH//2, Screen.HEIGHT//2))
Screen.window.blit(Screen.platorm, (0, 4*Screen.HEIGHT//5))
pygame.display.update()
def Game_loop():
for event in pygame.event.get():
if event.type==pygame.QUIT:
run=False
sys.exit()
keys=pygame.key.get_pressed()
if keys[pygame.K_SPACE] or keys[pygame.K_UP]:
pass
def main():
pygame.display.init()
pygame.font.init()
pygame.display.set_caption("Flappy Bird")
pygame.display.set_mode((Screen.WIDTH, Screen.HEIGHT))
bg_win()
pygame.time.delay(50)
run=True
while run:
Game_loop()
pygame.quit()
if __name__=='__main__':
main()
| 20.507937 | 72 | 0.643189 |
0a95585cb90415a4450e8f3af559e5c8fea63e15
| 1,391 |
py
|
Python
|
demos/mlp/vae_train.py
|
Ryandry1st/vampyre
|
43bd6198ee0cbe0d3270d0c674127c7cbbb4c95e
|
[
"MIT"
] | 59 |
2017-01-27T22:36:38.000Z
|
2021-12-08T04:16:13.000Z
|
demos/mlp/vae_train.py
|
Ryandry1st/vampyre
|
43bd6198ee0cbe0d3270d0c674127c7cbbb4c95e
|
[
"MIT"
] | 10 |
2017-01-11T15:16:11.000Z
|
2021-02-17T10:43:51.000Z
|
demos/mlp/vae_train.py
|
Ryandry1st/vampyre
|
43bd6198ee0cbe0d3270d0c674127c7cbbb4c95e
|
[
"MIT"
] | 18 |
2017-01-11T14:58:32.000Z
|
2021-05-03T16:34:53.000Z
|
"""
vae_train.py: Trains the VAE using the MNIST dataset.
"""
from __future__ import division
from __future__ import print_function
import vae
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import argparse
"""
Parse arguments from command line
"""
parser = argparse.ArgumentParser(description='Trains a VAE model for the MNIST data')
parser.add_argument('-nsteps',action='store',default=20000,type=int,\
help='total number of steps')
parser.add_argument('-param_fn',action='store',default='param.p',type=str,\
help='file name for the parameter file')
parser.add_argument('-restore', dest='restore', action='store_true',\
help="Continue from previous run")
parser.set_defaults(restore=False)
args = parser.parse_args()
nsteps = args.nsteps
restore = args.restore
param_fn = args.param_fn
# Dimensions of the layers
enc_dim = [784,400,20]
dec_dim = [20,400,784]
# Load MNIST
if not 'mnist' in locals():
mnist = input_data.read_data_sets('MNIST')
# Build the VAE
#vae_net = vae.VAE(enc_dim, dec_dim, n_steps=int(20000))
vae_net = vae.VAE(enc_dim, dec_dim, n_steps=int(nsteps))
vae_net.build_graph()
# Train the model
vae_net.train(mnist,restore=restore)
# Dump the matrices
with tf.Session() as sess:
vae_net.dump_matrices(sess, 'param.p')
print("Data stored in file "+param_fn)
| 26.245283 | 85 | 0.735442 |
6df4631f5aeaddf9a16f50efad515a5100494ee7
| 35,229 |
py
|
Python
|
tests/test_dates.py
|
behnam/python-babel
|
1378a3d5e7f7f111ea7828ff7429f0abbb513700
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_dates.py
|
behnam/python-babel
|
1378a3d5e7f7f111ea7828ff7429f0abbb513700
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_dates.py
|
behnam/python-babel
|
1378a3d5e7f7f111ea7828ff7429f0abbb513700
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
import calendar
from datetime import date, datetime, time, timedelta
import unittest
import pytest
import pytz
from pytz import timezone
from babel import dates, Locale
from babel.dates import NO_INHERITANCE_MARKER
from babel.util import FixedOffsetTimezone
class DateTimeFormatTestCase(unittest.TestCase):
def test_quarter_format(self):
d = date(2006, 6, 8)
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('2', fmt['Q'])
self.assertEqual('2nd quarter', fmt['QQQQ'])
self.assertEqual('2', fmt['q'])
self.assertEqual('2nd quarter', fmt['qqqq'])
d = date(2006, 12, 31)
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('Q4', fmt['qqq'])
self.assertEqual('4', fmt['qqqqq'])
self.assertEqual('Q4', fmt['QQQ'])
self.assertEqual('4', fmt['QQQQQ'])
def test_month_context(self):
d = date(2006, 2, 8)
fmt = dates.DateTimeFormat(d, locale='mt_MT')
self.assertEqual(u'F', fmt['MMMMM']) # narrow format
fmt = dates.DateTimeFormat(d, locale='mt_MT')
self.assertEqual(u'Fr', fmt['LLLLL']) # narrow standalone
def test_abbreviated_month_alias(self):
d = date(2006, 3, 8)
fmt = dates.DateTimeFormat(d, locale='de_DE')
self.assertEqual(u'Mär', fmt['LLL'])
def test_week_of_year_first(self):
d = date(2006, 1, 8)
fmt = dates.DateTimeFormat(d, locale='de_DE')
self.assertEqual('1', fmt['w'])
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('02', fmt['ww'])
def test_week_of_year_first_with_year(self):
d = date(2006, 1, 1)
fmt = dates.DateTimeFormat(d, locale='de_DE')
self.assertEqual('52', fmt['w'])
self.assertEqual('2005', fmt['YYYY'])
def test_week_of_year_last(self):
d = date(2006, 12, 26)
fmt = dates.DateTimeFormat(d, locale='de_DE')
self.assertEqual('52', fmt['w'])
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('52', fmt['w'])
def test_week_of_year_last_us_extra_week(self):
d = date(2005, 12, 26)
fmt = dates.DateTimeFormat(d, locale='de_DE')
self.assertEqual('52', fmt['w'])
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('53', fmt['w'])
def test_week_of_month_first(self):
d = date(2006, 1, 8)
fmt = dates.DateTimeFormat(d, locale='de_DE')
self.assertEqual('1', fmt['W'])
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('2', fmt['W'])
def test_week_of_month_last(self):
d = date(2006, 1, 29)
fmt = dates.DateTimeFormat(d, locale='de_DE')
self.assertEqual('4', fmt['W'])
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('5', fmt['W'])
def test_day_of_year(self):
d = date(2007, 4, 1)
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('91', fmt['D'])
def test_day_of_year_works_with_datetime(self):
d = datetime(2007, 4, 1)
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('91', fmt['D'])
def test_day_of_year_first(self):
d = date(2007, 1, 1)
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('001', fmt['DDD'])
def test_day_of_year_last(self):
d = date(2007, 12, 31)
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('365', fmt['DDD'])
def test_day_of_week_in_month(self):
d = date(2007, 4, 15)
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('3', fmt['F'])
def test_day_of_week_in_month_first(self):
d = date(2007, 4, 1)
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('1', fmt['F'])
def test_day_of_week_in_month_last(self):
d = date(2007, 4, 29)
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('5', fmt['F'])
def test_local_day_of_week(self):
d = date(2007, 4, 1) # a sunday
fmt = dates.DateTimeFormat(d, locale='de_DE')
self.assertEqual('7', fmt['e']) # monday is first day of week
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('01', fmt['ee']) # sunday is first day of week
fmt = dates.DateTimeFormat(d, locale='bn_BD')
self.assertEqual('03', fmt['ee']) # friday is first day of week
d = date(2007, 4, 2) # a monday
fmt = dates.DateTimeFormat(d, locale='de_DE')
self.assertEqual('1', fmt['e']) # monday is first day of week
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('02', fmt['ee']) # sunday is first day of week
fmt = dates.DateTimeFormat(d, locale='bn_BD')
self.assertEqual('04', fmt['ee']) # friday is first day of week
def test_local_day_of_week_standalone(self):
d = date(2007, 4, 1) # a sunday
fmt = dates.DateTimeFormat(d, locale='de_DE')
self.assertEqual('7', fmt['c']) # monday is first day of week
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('1', fmt['c']) # sunday is first day of week
fmt = dates.DateTimeFormat(d, locale='bn_BD')
self.assertEqual('3', fmt['c']) # friday is first day of week
d = date(2007, 4, 2) # a monday
fmt = dates.DateTimeFormat(d, locale='de_DE')
self.assertEqual('1', fmt['c']) # monday is first day of week
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('2', fmt['c']) # sunday is first day of week
fmt = dates.DateTimeFormat(d, locale='bn_BD')
self.assertEqual('4', fmt['c']) # friday is first day of week
def test_pattern_day_of_week(self):
dt = datetime(2016, 2, 6)
fmt = dates.DateTimeFormat(dt, locale='en_US')
self.assertEqual('7', fmt['c'])
self.assertEqual('Sat', fmt['ccc'])
self.assertEqual('Saturday', fmt['cccc'])
self.assertEqual('S', fmt['ccccc'])
self.assertEqual('Sa', fmt['cccccc'])
self.assertEqual('7', fmt['e'])
self.assertEqual('07', fmt['ee'])
self.assertEqual('Sat', fmt['eee'])
self.assertEqual('Saturday', fmt['eeee'])
self.assertEqual('S', fmt['eeeee'])
self.assertEqual('Sa', fmt['eeeeee'])
self.assertEqual('Sat', fmt['E'])
self.assertEqual('Sat', fmt['EE'])
self.assertEqual('Sat', fmt['EEE'])
self.assertEqual('Saturday', fmt['EEEE'])
self.assertEqual('S', fmt['EEEEE'])
self.assertEqual('Sa', fmt['EEEEEE'])
fmt = dates.DateTimeFormat(dt, locale='uk')
self.assertEqual('6', fmt['c'])
self.assertEqual('6', fmt['e'])
self.assertEqual('06', fmt['ee'])
def test_fractional_seconds(self):
t = time(8, 3, 9, 799)
fmt = dates.DateTimeFormat(t, locale='en_US')
self.assertEqual('0', fmt['S'])
t = time(8, 3, 1, 799)
fmt = dates.DateTimeFormat(t, locale='en_US')
self.assertEqual('0008', fmt['SSSS'])
t = time(8, 3, 1, 34567)
fmt = dates.DateTimeFormat(t, locale='en_US')
self.assertEqual('0346', fmt['SSSS'])
t = time(8, 3, 1, 345678)
fmt = dates.DateTimeFormat(t, locale='en_US')
self.assertEqual('345678', fmt['SSSSSS'])
t = time(8, 3, 1, 799)
fmt = dates.DateTimeFormat(t, locale='en_US')
self.assertEqual('00080', fmt['SSSSS'])
def test_fractional_seconds_zero(self):
t = time(15, 30, 0)
fmt = dates.DateTimeFormat(t, locale='en_US')
self.assertEqual('0000', fmt['SSSS'])
def test_milliseconds_in_day(self):
t = time(15, 30, 12, 345000)
fmt = dates.DateTimeFormat(t, locale='en_US')
self.assertEqual('55812345', fmt['AAAA'])
def test_milliseconds_in_day_zero(self):
d = time(0, 0, 0)
fmt = dates.DateTimeFormat(d, locale='en_US')
self.assertEqual('0000', fmt['AAAA'])
def test_timezone_rfc822(self):
tz = timezone('Europe/Berlin')
t = tz.localize(datetime(2015, 1, 1, 15, 30))
fmt = dates.DateTimeFormat(t, locale='de_DE')
self.assertEqual('+0100', fmt['Z'])
def test_timezone_gmt(self):
tz = timezone('Europe/Berlin')
t = tz.localize(datetime(2015, 1, 1, 15, 30))
fmt = dates.DateTimeFormat(t, locale='de_DE')
self.assertEqual('GMT+01:00', fmt['ZZZZ'])
def test_timezone_name(self):
tz = timezone('Europe/Paris')
dt = tz.localize(datetime(2007, 4, 1, 15, 30))
fmt = dates.DateTimeFormat(dt, locale='fr_FR')
self.assertEqual('heure : France', fmt['v'])
def test_timezone_location_format(self):
tz = timezone('Europe/Paris')
dt = datetime(2007, 4, 1, 15, 30, tzinfo=tz)
fmt = dates.DateTimeFormat(dt, locale='fr_FR')
self.assertEqual('heure : France', fmt['VVVV'])
def test_timezone_walltime_short(self):
tz = timezone('Europe/Paris')
t = time(15, 30, tzinfo=tz)
fmt = dates.DateTimeFormat(t, locale='fr_FR')
self.assertEqual('heure : France', fmt['v'])
def test_timezone_walltime_long(self):
tz = timezone('Europe/Paris')
t = time(15, 30, tzinfo=tz)
fmt = dates.DateTimeFormat(t, locale='fr_FR')
self.assertEqual(u'heure d\u2019Europe centrale', fmt['vvvv'])
def test_hour_formatting(self):
l = 'en_US'
t = time(0, 0, 0)
self.assertEqual(dates.format_time(t, 'h a', locale=l), '12 AM')
self.assertEqual(dates.format_time(t, 'H', locale=l), '0')
self.assertEqual(dates.format_time(t, 'k', locale=l), '24')
self.assertEqual(dates.format_time(t, 'K a', locale=l), '0 AM')
t = time(12, 0, 0)
self.assertEqual(dates.format_time(t, 'h a', locale=l), '12 PM')
self.assertEqual(dates.format_time(t, 'H', locale=l), '12')
self.assertEqual(dates.format_time(t, 'k', locale=l), '12')
self.assertEqual(dates.format_time(t, 'K a', locale=l), '0 PM')
class FormatDateTestCase(unittest.TestCase):
def test_with_time_fields_in_pattern(self):
self.assertRaises(AttributeError, dates.format_date, date(2007, 4, 1),
"yyyy-MM-dd HH:mm", locale='en_US')
def test_with_time_fields_in_pattern_and_datetime_param(self):
self.assertRaises(AttributeError, dates.format_date,
datetime(2007, 4, 1, 15, 30),
"yyyy-MM-dd HH:mm", locale='en_US')
def test_with_day_of_year_in_pattern_and_datetime_param(self):
# format_date should work on datetimes just as well (see #282)
d = datetime(2007, 4, 1)
self.assertEqual('14', dates.format_date(d, 'w', locale='en_US'))
class FormatDatetimeTestCase(unittest.TestCase):
def test_with_float(self):
d = datetime(2012, 4, 1, 15, 30, 29, tzinfo=timezone('UTC'))
epoch = float(calendar.timegm(d.timetuple()))
formatted_string = dates.format_datetime(epoch, format='long', locale='en_US')
self.assertEqual(u'April 1, 2012 at 3:30:29 PM UTC', formatted_string)
def test_timezone_formats(self):
dt = datetime(2016, 1, 13, 7, 8, 35)
tz = dates.get_timezone('America/Los_Angeles')
dt = tz.localize(dt)
formatted_string = dates.format_datetime(dt, 'z', locale='en')
self.assertEqual(u'PST', formatted_string)
formatted_string = dates.format_datetime(dt, 'zz', locale='en')
self.assertEqual(u'PST', formatted_string)
formatted_string = dates.format_datetime(dt, 'zzz', locale='en')
self.assertEqual(u'PST', formatted_string)
formatted_string = dates.format_datetime(dt, 'zzzz', locale='en')
self.assertEqual(u'Pacific Standard Time', formatted_string)
formatted_string = dates.format_datetime(dt, 'Z', locale='en')
self.assertEqual(u'-0800', formatted_string)
formatted_string = dates.format_datetime(dt, 'ZZ', locale='en')
self.assertEqual(u'-0800', formatted_string)
formatted_string = dates.format_datetime(dt, 'ZZZ', locale='en')
self.assertEqual(u'-0800', formatted_string)
formatted_string = dates.format_datetime(dt, 'ZZZZ', locale='en')
self.assertEqual(u'GMT-08:00', formatted_string)
formatted_string = dates.format_datetime(dt, 'ZZZZZ', locale='en')
self.assertEqual(u'-08:00', formatted_string)
formatted_string = dates.format_datetime(dt, 'OOOO', locale='en')
self.assertEqual(u'GMT-08:00', formatted_string)
formatted_string = dates.format_datetime(dt, 'VV', locale='en')
self.assertEqual(u'America/Los_Angeles', formatted_string)
formatted_string = dates.format_datetime(dt, 'VVV', locale='en')
self.assertEqual(u'Los Angeles', formatted_string)
formatted_string = dates.format_datetime(dt, 'X', locale='en')
self.assertEqual(u'-08', formatted_string)
formatted_string = dates.format_datetime(dt, 'XX', locale='en')
self.assertEqual(u'-0800', formatted_string)
formatted_string = dates.format_datetime(dt, 'XXX', locale='en')
self.assertEqual(u'-08:00', formatted_string)
formatted_string = dates.format_datetime(dt, 'XXXX', locale='en')
self.assertEqual(u'-0800', formatted_string)
formatted_string = dates.format_datetime(dt, 'XXXXX', locale='en')
self.assertEqual(u'-08:00', formatted_string)
formatted_string = dates.format_datetime(dt, 'x', locale='en')
self.assertEqual(u'-08', formatted_string)
formatted_string = dates.format_datetime(dt, 'xx', locale='en')
self.assertEqual(u'-0800', formatted_string)
formatted_string = dates.format_datetime(dt, 'xxx', locale='en')
self.assertEqual(u'-08:00', formatted_string)
formatted_string = dates.format_datetime(dt, 'xxxx', locale='en')
self.assertEqual(u'-0800', formatted_string)
formatted_string = dates.format_datetime(dt, 'xxxxx', locale='en')
self.assertEqual(u'-08:00', formatted_string)
dt = datetime(2016, 1, 13, 7, 8, 35)
tz = dates.get_timezone('UTC')
dt = tz.localize(dt)
formatted_string = dates.format_datetime(dt, 'Z', locale='en')
self.assertEqual(u'+0000', formatted_string)
formatted_string = dates.format_datetime(dt, 'ZZ', locale='en')
self.assertEqual(u'+0000', formatted_string)
formatted_string = dates.format_datetime(dt, 'ZZZ', locale='en')
self.assertEqual(u'+0000', formatted_string)
formatted_string = dates.format_datetime(dt, 'ZZZZ', locale='en')
self.assertEqual(u'GMT+00:00', formatted_string)
formatted_string = dates.format_datetime(dt, 'ZZZZZ', locale='en')
self.assertEqual(u'Z', formatted_string)
formatted_string = dates.format_datetime(dt, 'OOOO', locale='en')
self.assertEqual(u'GMT+00:00', formatted_string)
formatted_string = dates.format_datetime(dt, 'VV', locale='en')
self.assertEqual(u'Etc/UTC', formatted_string)
formatted_string = dates.format_datetime(dt, 'VVV', locale='en')
self.assertEqual(u'UTC', formatted_string)
formatted_string = dates.format_datetime(dt, 'X', locale='en')
self.assertEqual(u'Z', formatted_string)
formatted_string = dates.format_datetime(dt, 'XX', locale='en')
self.assertEqual(u'Z', formatted_string)
formatted_string = dates.format_datetime(dt, 'XXX', locale='en')
self.assertEqual(u'Z', formatted_string)
formatted_string = dates.format_datetime(dt, 'XXXX', locale='en')
self.assertEqual(u'Z', formatted_string)
formatted_string = dates.format_datetime(dt, 'XXXXX', locale='en')
self.assertEqual(u'Z', formatted_string)
formatted_string = dates.format_datetime(dt, 'x', locale='en')
self.assertEqual(u'+00', formatted_string)
formatted_string = dates.format_datetime(dt, 'xx', locale='en')
self.assertEqual(u'+0000', formatted_string)
formatted_string = dates.format_datetime(dt, 'xxx', locale='en')
self.assertEqual(u'+00:00', formatted_string)
formatted_string = dates.format_datetime(dt, 'xxxx', locale='en')
self.assertEqual(u'+0000', formatted_string)
formatted_string = dates.format_datetime(dt, 'xxxxx', locale='en')
self.assertEqual(u'+00:00', formatted_string)
dt = datetime(2016, 1, 13, 7, 8, 35)
tz = dates.get_timezone('Asia/Kolkata')
dt = tz.localize(dt)
formatted_string = dates.format_datetime(dt, 'zzzz', locale='en')
self.assertEqual(u'India Standard Time', formatted_string)
formatted_string = dates.format_datetime(dt, 'ZZZZ', locale='en')
self.assertEqual(u'GMT+05:30', formatted_string)
formatted_string = dates.format_datetime(dt, 'ZZZZZ', locale='en')
self.assertEqual(u'+05:30', formatted_string)
formatted_string = dates.format_datetime(dt, 'OOOO', locale='en')
self.assertEqual(u'GMT+05:30', formatted_string)
formatted_string = dates.format_datetime(dt, 'VV', locale='en')
self.assertEqual(u'Asia/Calcutta', formatted_string)
formatted_string = dates.format_datetime(dt, 'VVV', locale='en')
self.assertEqual(u'Kolkata', formatted_string)
formatted_string = dates.format_datetime(dt, 'X', locale='en')
self.assertEqual(u'+0530', formatted_string)
formatted_string = dates.format_datetime(dt, 'XX', locale='en')
self.assertEqual(u'+0530', formatted_string)
formatted_string = dates.format_datetime(dt, 'XXX', locale='en')
self.assertEqual(u'+05:30', formatted_string)
formatted_string = dates.format_datetime(dt, 'XXXX', locale='en')
self.assertEqual(u'+0530', formatted_string)
formatted_string = dates.format_datetime(dt, 'XXXXX', locale='en')
self.assertEqual(u'+05:30', formatted_string)
formatted_string = dates.format_datetime(dt, 'x', locale='en')
self.assertEqual(u'+0530', formatted_string)
formatted_string = dates.format_datetime(dt, 'xx', locale='en')
self.assertEqual(u'+0530', formatted_string)
formatted_string = dates.format_datetime(dt, 'xxx', locale='en')
self.assertEqual(u'+05:30', formatted_string)
formatted_string = dates.format_datetime(dt, 'xxxx', locale='en')
self.assertEqual(u'+0530', formatted_string)
formatted_string = dates.format_datetime(dt, 'xxxxx', locale='en')
self.assertEqual(u'+05:30', formatted_string)
class FormatTimeTestCase(unittest.TestCase):
def test_with_naive_datetime_and_tzinfo(self):
string = dates.format_time(datetime(2007, 4, 1, 15, 30),
'long', tzinfo=timezone('US/Eastern'),
locale='en')
self.assertEqual('11:30:00 AM EDT', string)
def test_with_float(self):
d = datetime(2012, 4, 1, 15, 30, 29, tzinfo=timezone('UTC'))
epoch = float(calendar.timegm(d.timetuple()))
formatted_time = dates.format_time(epoch, format='long', locale='en_US')
self.assertEqual(u'3:30:29 PM UTC', formatted_time)
def test_with_date_fields_in_pattern(self):
self.assertRaises(AttributeError, dates.format_time, date(2007, 4, 1),
"yyyy-MM-dd HH:mm", locale='en_US')
def test_with_date_fields_in_pattern_and_datetime_param(self):
self.assertRaises(AttributeError, dates.format_time,
datetime(2007, 4, 1, 15, 30),
"yyyy-MM-dd HH:mm", locale='en_US')
class FormatTimedeltaTestCase(unittest.TestCase):
def test_zero_seconds(self):
string = dates.format_timedelta(timedelta(seconds=0), locale='en')
self.assertEqual('0 seconds', string)
string = dates.format_timedelta(timedelta(seconds=0), locale='en',
format='short')
self.assertEqual('0 sec', string)
string = dates.format_timedelta(timedelta(seconds=0),
granularity='hour', locale='en')
self.assertEqual('0 hours', string)
string = dates.format_timedelta(timedelta(seconds=0),
granularity='hour', locale='en',
format='short')
self.assertEqual('0 hr', string)
def test_small_value_with_granularity(self):
string = dates.format_timedelta(timedelta(seconds=42),
granularity='hour', locale='en')
self.assertEqual('1 hour', string)
string = dates.format_timedelta(timedelta(seconds=42),
granularity='hour', locale='en',
format='short')
self.assertEqual('1 hr', string)
def test_direction_adding(self):
string = dates.format_timedelta(timedelta(hours=1),
locale='en',
add_direction=True)
self.assertEqual('in 1 hour', string)
string = dates.format_timedelta(timedelta(hours=-1),
locale='en',
add_direction=True)
self.assertEqual('1 hour ago', string)
def test_format_narrow(self):
string = dates.format_timedelta(timedelta(hours=1),
locale='en', format='narrow')
self.assertEqual('1h', string)
string = dates.format_timedelta(timedelta(hours=-2),
locale='en', format='narrow')
self.assertEqual('2h', string)
def test_format_invalid(self):
self.assertRaises(TypeError, dates.format_timedelta,
timedelta(hours=1), format='')
self.assertRaises(TypeError, dates.format_timedelta,
timedelta(hours=1), format='bold italic')
self.assertRaises(TypeError, dates.format_timedelta,
timedelta(hours=1), format=None)
class TimeZoneAdjustTestCase(unittest.TestCase):
def _utc(self):
class EvilFixedOffsetTimezone(FixedOffsetTimezone):
def localize(self, dt, is_dst=False):
raise NotImplementedError()
UTC = EvilFixedOffsetTimezone(0, 'UTC')
# This is important to trigger the actual bug (#257)
self.assertEqual(False, hasattr(UTC, 'normalize'))
return UTC
def test_can_format_time_with_non_pytz_timezone(self):
# regression test for #257
utc = self._utc()
t = datetime(2007, 4, 1, 15, 30, tzinfo=utc)
formatted_time = dates.format_time(t, 'long', tzinfo=utc, locale='en')
self.assertEqual('3:30:00 PM UTC', formatted_time)
def test_get_period_names():
assert dates.get_period_names(locale='en_US')['am'] == u'AM'
def test_get_day_names():
assert dates.get_day_names('wide', locale='en_US')[1] == u'Tuesday'
assert dates.get_day_names('short', locale='en_US')[1] == u'Tu'
assert dates.get_day_names('abbreviated', locale='es')[1] == u'mar.'
de = dates.get_day_names('narrow', context='stand-alone', locale='de_DE')
assert de[1] == u'D'
def test_get_month_names():
assert dates.get_month_names('wide', locale='en_US')[1] == u'January'
assert dates.get_month_names('abbreviated', locale='es')[1] == u'ene.'
de = dates.get_month_names('narrow', context='stand-alone', locale='de_DE')
assert de[1] == u'J'
def test_get_quarter_names():
assert dates.get_quarter_names('wide', locale='en_US')[1] == u'1st quarter'
assert dates.get_quarter_names('abbreviated', locale='de_DE')[1] == u'Q1'
assert dates.get_quarter_names('narrow', locale='de_DE')[1] == u'1'
def test_get_era_names():
assert dates.get_era_names('wide', locale='en_US')[1] == u'Anno Domini'
assert dates.get_era_names('abbreviated', locale='de_DE')[1] == u'n. Chr.'
def test_get_date_format():
us = dates.get_date_format(locale='en_US')
assert us.pattern == u'MMM d, y'
de = dates.get_date_format('full', locale='de_DE')
assert de.pattern == u'EEEE, d. MMMM y'
def test_get_datetime_format():
assert dates.get_datetime_format(locale='en_US') == u'{1}, {0}'
def test_get_time_format():
assert dates.get_time_format(locale='en_US').pattern == u'h:mm:ss a'
assert (dates.get_time_format('full', locale='de_DE').pattern ==
u'HH:mm:ss zzzz')
def test_get_timezone_gmt():
dt = datetime(2007, 4, 1, 15, 30)
assert dates.get_timezone_gmt(dt, locale='en') == u'GMT+00:00'
assert dates.get_timezone_gmt(dt, locale='en', return_z=True) == 'Z'
assert dates.get_timezone_gmt(dt, locale='en', width='iso8601_short') == u'+00'
tz = timezone('America/Los_Angeles')
dt = tz.localize(datetime(2007, 4, 1, 15, 30))
assert dates.get_timezone_gmt(dt, locale='en') == u'GMT-07:00'
assert dates.get_timezone_gmt(dt, 'short', locale='en') == u'-0700'
assert dates.get_timezone_gmt(dt, locale='en', width='iso8601_short') == u'-07'
assert dates.get_timezone_gmt(dt, 'long', locale='fr_FR') == u'UTC-07:00'
def test_get_timezone_location():
tz = timezone('America/St_Johns')
assert (dates.get_timezone_location(tz, locale='de_DE') ==
u"Kanada (St. John\u2019s) Zeit")
assert (dates.get_timezone_location(tz, locale='en') ==
u'Canada (St. John’s) Time')
assert (dates.get_timezone_location(tz, locale='en', return_city=True) ==
u'St. John’s')
tz = timezone('America/Mexico_City')
assert (dates.get_timezone_location(tz, locale='de_DE') ==
u'Mexiko (Mexiko-Stadt) Zeit')
tz = timezone('Europe/Berlin')
assert (dates.get_timezone_location(tz, locale='de_DE') ==
u'Deutschland (Berlin) Zeit')
def test_get_timezone_name():
dt = time(15, 30, tzinfo=timezone('America/Los_Angeles'))
assert (dates.get_timezone_name(dt, locale='en_US') ==
u'Pacific Standard Time')
assert (dates.get_timezone_name(dt, locale='en_US', return_zone=True) ==
u'America/Los_Angeles')
assert dates.get_timezone_name(dt, width='short', locale='en_US') == u'PST'
tz = timezone('America/Los_Angeles')
assert dates.get_timezone_name(tz, locale='en_US') == u'Pacific Time'
assert dates.get_timezone_name(tz, 'short', locale='en_US') == u'PT'
tz = timezone('Europe/Berlin')
assert (dates.get_timezone_name(tz, locale='de_DE') ==
u'Mitteleurop\xe4ische Zeit')
assert (dates.get_timezone_name(tz, locale='pt_BR') ==
u'Hor\xe1rio da Europa Central')
tz = timezone('America/St_Johns')
assert dates.get_timezone_name(tz, locale='de_DE') == u'Neufundland-Zeit'
tz = timezone('America/Los_Angeles')
assert dates.get_timezone_name(tz, locale='en', width='short',
zone_variant='generic') == u'PT'
assert dates.get_timezone_name(tz, locale='en', width='short',
zone_variant='standard') == u'PST'
assert dates.get_timezone_name(tz, locale='en', width='short',
zone_variant='daylight') == u'PDT'
assert dates.get_timezone_name(tz, locale='en', width='long',
zone_variant='generic') == u'Pacific Time'
assert dates.get_timezone_name(tz, locale='en', width='long',
zone_variant='standard') == u'Pacific Standard Time'
assert dates.get_timezone_name(tz, locale='en', width='long',
zone_variant='daylight') == u'Pacific Daylight Time'
localnow = datetime.utcnow().replace(tzinfo=timezone('UTC')).astimezone(dates.LOCALTZ)
assert (dates.get_timezone_name(None, locale='en_US') ==
dates.get_timezone_name(localnow, locale='en_US'))
assert (dates.get_timezone_name('Europe/Berlin', locale='en_US') == "Central European Time")
assert (dates.get_timezone_name(1400000000, locale='en_US', width='short') == "Unknown Region (UTC) Time")
assert (dates.get_timezone_name(time(16, 20), locale='en_US', width='short') == "UTC")
def test_format_date():
d = date(2007, 4, 1)
assert dates.format_date(d, locale='en_US') == u'Apr 1, 2007'
assert (dates.format_date(d, format='full', locale='de_DE') ==
u'Sonntag, 1. April 2007')
assert (dates.format_date(d, "EEE, MMM d, ''yy", locale='en') ==
u"Sun, Apr 1, '07")
def test_format_datetime():
dt = datetime(2007, 4, 1, 15, 30)
assert (dates.format_datetime(dt, locale='en_US') ==
u'Apr 1, 2007, 3:30:00 PM')
full = dates.format_datetime(dt, 'full', tzinfo=timezone('Europe/Paris'),
locale='fr_FR')
assert full == (u'dimanche 1 avril 2007 à 17:30:00 heure '
u'd\u2019\xe9t\xe9 d\u2019Europe centrale')
custom = dates.format_datetime(dt, "yyyy.MM.dd G 'at' HH:mm:ss zzz",
tzinfo=timezone('US/Eastern'), locale='en')
assert custom == u'2007.04.01 AD at 11:30:00 EDT'
def test_format_time():
t = time(15, 30)
assert dates.format_time(t, locale='en_US') == u'3:30:00 PM'
assert dates.format_time(t, format='short', locale='de_DE') == u'15:30'
assert (dates.format_time(t, "hh 'o''clock' a", locale='en') ==
u"03 o'clock PM")
t = datetime(2007, 4, 1, 15, 30)
tzinfo = timezone('Europe/Paris')
t = tzinfo.localize(t)
fr = dates.format_time(t, format='full', tzinfo=tzinfo, locale='fr_FR')
assert fr == u'15:30:00 heure d\u2019\xe9t\xe9 d\u2019Europe centrale'
custom = dates.format_time(t, "hh 'o''clock' a, zzzz",
tzinfo=timezone('US/Eastern'), locale='en')
assert custom == u"09 o'clock AM, Eastern Daylight Time"
t = time(15, 30)
paris = dates.format_time(t, format='full',
tzinfo=timezone('Europe/Paris'), locale='fr_FR')
assert paris == u'15:30:00 heure normale d\u2019Europe centrale'
us_east = dates.format_time(t, format='full',
tzinfo=timezone('US/Eastern'), locale='en_US')
assert us_east == u'3:30:00 PM Eastern Standard Time'
def test_format_skeleton():
dt = datetime(2007, 4, 1, 15, 30)
assert (dates.format_skeleton('yMEd', dt, locale='en_US') == u'Sun, 4/1/2007')
assert (dates.format_skeleton('yMEd', dt, locale='th') == u'อา. 1/4/2007')
assert (dates.format_skeleton('EHm', dt, locale='en') == u'Sun 15:30')
assert (dates.format_skeleton('EHm', dt, tzinfo=timezone('Asia/Bangkok'), locale='th') == u'อา. 22:30 น.')
def test_format_timedelta():
assert (dates.format_timedelta(timedelta(weeks=12), locale='en_US')
== u'3 months')
assert (dates.format_timedelta(timedelta(seconds=1), locale='es')
== u'1 segundo')
assert (dates.format_timedelta(timedelta(hours=3), granularity='day',
locale='en_US')
== u'1 day')
assert (dates.format_timedelta(timedelta(hours=23), threshold=0.9,
locale='en_US')
== u'1 day')
assert (dates.format_timedelta(timedelta(hours=23), threshold=1.1,
locale='en_US')
== u'23 hours')
def test_parse_date():
assert dates.parse_date('4/1/04', locale='en_US') == date(2004, 4, 1)
assert dates.parse_date('01.04.2004', locale='de_DE') == date(2004, 4, 1)
def test_parse_time():
assert dates.parse_time('15:30:00', locale='en_US') == time(15, 30)
def test_datetime_format_get_week_number():
format = dates.DateTimeFormat(date(2006, 1, 8), Locale.parse('de_DE'))
assert format.get_week_number(6) == 1
format = dates.DateTimeFormat(date(2006, 1, 8), Locale.parse('en_US'))
assert format.get_week_number(6) == 2
def test_parse_pattern():
assert dates.parse_pattern("MMMMd").format == u'%(MMMM)s%(d)s'
assert (dates.parse_pattern("MMM d, yyyy").format ==
u'%(MMM)s %(d)s, %(yyyy)s')
assert (dates.parse_pattern("H:mm' Uhr 'z").format ==
u'%(H)s:%(mm)s Uhr %(z)s')
assert dates.parse_pattern("hh' o''clock'").format == u"%(hh)s o'clock"
def test_lithuanian_long_format():
assert (
dates.format_date(date(2015, 12, 10), locale='lt_LT', format='long') ==
u'2015 m. gruodžio 10 d.'
)
def test_zh_TW_format():
# Refs GitHub issue #378
assert dates.format_time(datetime(2016, 4, 8, 12, 34, 56), locale='zh_TW') == u'\u4e0b\u534812:34:56'
def test_format_current_moment(monkeypatch):
import datetime as datetime_module
frozen_instant = datetime.utcnow()
class frozen_datetime(datetime):
@classmethod
def utcnow(cls):
return frozen_instant
# Freeze time! Well, some of it anyway.
monkeypatch.setattr(datetime_module, "datetime", frozen_datetime)
assert dates.format_datetime(locale="en_US") == dates.format_datetime(frozen_instant, locale="en_US")
@pytest.mark.all_locales
def test_no_inherit_metazone_marker_never_in_output(locale):
# See: https://github.com/python-babel/babel/issues/428
tz = pytz.timezone('America/Los_Angeles')
t = tz.localize(datetime(2016, 1, 6, 7))
assert NO_INHERITANCE_MARKER not in dates.format_time(t, format='long', locale=locale)
assert NO_INHERITANCE_MARKER not in dates.get_timezone_name(t, width='short', locale=locale)
def test_no_inherit_metazone_formatting():
# See: https://github.com/python-babel/babel/issues/428
tz = pytz.timezone('America/Los_Angeles')
t = tz.localize(datetime(2016, 1, 6, 7))
assert dates.format_time(t, format='long', locale='en_US') == "7:00:00 AM PST"
assert dates.format_time(t, format='long', locale='en_GB') == "07:00:00 Pacific Standard Time"
assert dates.get_timezone_name(t, width='short', locale='en_US') == "PST"
assert dates.get_timezone_name(t, width='short', locale='en_GB') == "Pacific Standard Time"
def test_russian_week_numbering():
# See https://github.com/python-babel/babel/issues/485
v = date(2017, 1, 1)
assert dates.format_date(v, format='YYYY-ww',locale='ru_RU') == '2016-52' # This would have returned 2017-01 prior to CLDR 32
assert dates.format_date(v, format='YYYY-ww',locale='de_DE') == '2016-52'
| 44.481061 | 130 | 0.626416 |
dd5536af6c031bbc5a89817e4396567c81c14d6d
| 5,184 |
py
|
Python
|
tests/test_pytest_runner.py
|
dshkuratov/dibctl
|
0467ed862c021315210ce76380feaa3cc09251d8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_pytest_runner.py
|
dshkuratov/dibctl
|
0467ed862c021315210ce76380feaa3cc09251d8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_pytest_runner.py
|
dshkuratov/dibctl
|
0467ed862c021315210ce76380feaa3cc09251d8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import mock
import pytest
import os
import inspect
import sys
from mock import sentinel
import paramiko
@pytest.fixture
def pytest_runner():
from dibctl import pytest_runner
return pytest_runner
@pytest.fixture
def ssh():
from dibctl import ssh
return ssh
@pytest.fixture
def prepare_os():
from dibctl import prepare_os
return prepare_os
@pytest.fixture
def dcp(pytest_runner, ssh):
tos = mock.MagicMock()
tos.ip = '192.168.0.1'
tos.os_instance.interface_list.return_value = [sentinel.iface1, sentinel.iface2]
tos.flavor.return_value.get_keys.return_value = {'name': 'value'}
tos.key_name = 'foo-key-name'
tos.os_key_private_file = 'private-file'
tos.ips.return_value = [sentinel.ip1, sentinel.ip2]
tos.ips_by_version.return_value = [sentinel.ip3, sentinel.ip4]
tos.get_image_info.return_value = sentinel.image_info
tos.image = sentinel.image
tos.os_instance.get_console_output.return_value = sentinel.console_out
s = ssh.SSH('192.168.0.1', 'root', 'secret')
dcp = pytest_runner.DibCtlPlugin(s, tos, {})
return dcp
@pytest.mark.parametrize("code, status", [
[0, True],
[-1, False],
[1, False]
])
def test_runner_status(pytest_runner, code, status):
with mock.patch.object(pytest_runner, "DibCtlPlugin"):
with mock.patch.object(pytest_runner.pytest, "main", return_value=code):
assert pytest_runner.runner(
sentinel.path,
sentinel.ssh,
sentinel.tos,
sentinel.environment_variables,
sentinel.timeout_val,
False,
) == status
def test_runner_status_cont_on_fail_true(pytest_runner):
with mock.patch.object(pytest_runner, "DibCtlPlugin"):
with mock.patch.object(pytest_runner.pytest, "main", return_value=-1) as mock_main:
pytest_runner.runner(
sentinel.path,
sentinel.ssh,
sentinel.tos,
sentinel.environment_variables,
sentinel.timeout_val,
False,
)
assert '-x' in mock_main.call_args[0][0]
def test_runner_status_cont_on_fail_false(pytest_runner):
with mock.patch.object(pytest_runner, "DibCtlPlugin"):
with mock.patch.object(pytest_runner.pytest, "main", return_value=-1) as mock_main:
pytest_runner.runner(
sentinel.path,
sentinel.ssh,
sentinel.tos,
sentinel.environment_variables,
sentinel.timeout_val,
True
)
assert '-x' not in mock_main.call_args[0][0]
def test_DibCtlPlugin_init_soft_import(dcp):
assert dcp.testinfra
def test_DibCtlPlugin_init_no_testinfra(pytest_runner):
with mock.patch.dict(sys.modules, {'testinfra': None}):
dcp = pytest_runner.DibCtlPlugin(
sentinel.ssh,
mock.MagicMock(),
{}
)
assert dcp.testinfra is None
with pytest.raises(ImportError):
dcp.ssh_backend(mock.MagicMock())
def test_DibCtlPlugin_flavor_fixture(dcp):
assert dcp.flavor(sentinel.request)
def test_DibCtlPlugin_flavor_meta_fixture(dcp):
assert dcp.flavor_meta(sentinel.request) == {'name': 'value'}
def test_DibCtlPlugin_instance_fixture(dcp):
assert dcp.instance(sentinel.request)
def test_DibCtlPlugin_network_fixture(dcp):
assert dcp.network(sentinel.request) == [sentinel.iface1, sentinel.iface2]
def test_DibCtlPlugin_wait_for_port_fixture(dcp):
dcp.wait_for_port(sentinel.request)()
assert dcp.tos.wait_for_port.call_args == mock.call(22, 60)
def test_DibCtlPlugin_ips_fixture(dcp):
assert dcp.ips(sentinel.request) == [sentinel.ip1, sentinel.ip2]
def test_DibCtlPlugin_ips_v4_fixture(dcp):
assert dcp.ips_v4(sentinel.request) == [sentinel.ip3, sentinel.ip4]
def test_DibCtlPlugin_main_ip_fixture(dcp):
assert dcp.main_ip(sentinel.request) == '192.168.0.1'
def test_DibCtlPlugin_image_info_fixture(dcp):
assert dcp.image_info(sentinel.request) == sentinel.image_info
def test_DibCtlPlugin_image_config_fixture(dcp):
assert dcp.image_config(sentinel.request) == sentinel.image
def test_DibCtlPlugin_console_output_fixture(dcp):
assert dcp.console_output(sentinel.request) == sentinel.console_out
def test_DibCtlPlugin_ssh_client_fixture(dcp):
assert isinstance(dcp.ssh_client(), paramiko.client.SSHClient)
@pytest.mark.parametrize('key, value', [
['ip', '192.168.0.1'],
['username', 'root']
])
def test_DibCtlPlugin_ssh_fixture(dcp, key, value):
ssh = dcp.ssh(sentinel.request)
assert ssh[key] == value
if __name__ == "__main__":
ourfilename = os.path.abspath(inspect.getfile(inspect.currentframe()))
currentdir = os.path.dirname(ourfilename)
parentdir = os.path.dirname(currentdir)
file_to_test = os.path.join(
parentdir,
os.path.basename(parentdir),
os.path.basename(ourfilename).replace("test_", '', 1)
)
pytest.main([
"-vv",
"--cov", file_to_test,
"--cov-report", "term-missing"
] + sys.argv)
| 28.640884 | 91 | 0.67689 |
3cf84acef2fb4408c72cbb12dd25e045a5f82917
| 934 |
py
|
Python
|
examples/utils/test_throughput.py
|
case547/acconeer-python-exploration
|
e92de2c3bc8b60939276128e1ddca47486cdfb54
|
[
"BSD-3-Clause-Clear"
] | 117 |
2018-09-19T14:31:55.000Z
|
2022-03-21T05:14:53.000Z
|
examples/utils/test_throughput.py
|
case547/acconeer-python-exploration
|
e92de2c3bc8b60939276128e1ddca47486cdfb54
|
[
"BSD-3-Clause-Clear"
] | 100 |
2019-03-11T04:54:54.000Z
|
2022-03-23T12:40:05.000Z
|
examples/utils/test_throughput.py
|
case547/acconeer-python-exploration
|
e92de2c3bc8b60939276128e1ddca47486cdfb54
|
[
"BSD-3-Clause-Clear"
] | 52 |
2019-06-16T13:35:29.000Z
|
2022-03-07T14:29:13.000Z
|
import acconeer.exptool as et
def main():
args = et.utils.ExampleArgumentParser().parse_args()
et.utils.config_logging(args)
if args.socket_addr:
client = et.SocketClient(args.socket_addr)
elif args.spi:
client = et.SPIClient()
else:
port = args.serial_port or et.utils.autodetect_serial_port()
client = et.UARTClient(port)
config = et.configs.IQServiceConfig()
config.sensor = args.sensors
config.range_interval = [0.2, 0.6]
config.update_rate = 50
info = client.start_session(config)
interrupt_handler = et.utils.ExampleInterruptHandler()
print("Press Ctrl-C to end session")
fc = et.utils.FreqCounter(num_bits=(4 * 8 * info["data_length"]))
while not interrupt_handler.got_signal:
info, data = client.get_next()
fc.tick()
print("\nDisconnecting...")
client.disconnect()
if __name__ == "__main__":
main()
| 24.578947 | 69 | 0.662741 |
4b52347adcacd02db42f7e4166319cae9bcad209
| 3,918 |
py
|
Python
|
isi_sdk_8_0_1/isi_sdk_8_0_1/models/id_resolution_path.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24 |
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_0_1/isi_sdk_8_0_1/models/id_resolution_path.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46 |
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_0_1/isi_sdk_8_0_1/models/id_resolution_path.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29 |
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 4
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class IdResolutionPath(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'lin': 'str',
'path': 'str'
}
attribute_map = {
'lin': 'lin',
'path': 'path'
}
def __init__(self, lin=None, path=None): # noqa: E501
"""IdResolutionPath - a model defined in Swagger""" # noqa: E501
self._lin = None
self._path = None
self.discriminator = None
if lin is not None:
self.lin = lin
if path is not None:
self.path = path
@property
def lin(self):
"""Gets the lin of this IdResolutionPath. # noqa: E501
Logical Inode Number (LIN). A 64-bit number which uniquely identifies a file throughout its life. # noqa: E501
:return: The lin of this IdResolutionPath. # noqa: E501
:rtype: str
"""
return self._lin
@lin.setter
def lin(self, lin):
"""Sets the lin of this IdResolutionPath.
Logical Inode Number (LIN). A 64-bit number which uniquely identifies a file throughout its life. # noqa: E501
:param lin: The lin of this IdResolutionPath. # noqa: E501
:type: str
"""
self._lin = lin
@property
def path(self):
"""Gets the path of this IdResolutionPath. # noqa: E501
The full path associated with the lin. null if the lin cannot be resolved to a path. # noqa: E501
:return: The path of this IdResolutionPath. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this IdResolutionPath.
The full path associated with the lin. null if the lin cannot be resolved to a path. # noqa: E501
:param path: The path of this IdResolutionPath. # noqa: E501
:type: str
"""
self._path = path
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IdResolutionPath):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.398601 | 119 | 0.55998 |
d1f94738e664179ac2649184bb7be9e6c1fe1a88
| 1,291 |
py
|
Python
|
final/100401053/client/client.py
|
hasan-se/blm304
|
893d15282497a426ff96b0c8b6c77d57c406742e
|
[
"Unlicense"
] | 1 |
2021-05-04T21:46:08.000Z
|
2021-05-04T21:46:08.000Z
|
final/100401053/client/client.py
|
hasan-se/blm304
|
893d15282497a426ff96b0c8b6c77d57c406742e
|
[
"Unlicense"
] | null | null | null |
final/100401053/client/client.py
|
hasan-se/blm304
|
893d15282497a426ff96b0c8b6c77d57c406742e
|
[
"Unlicense"
] | null | null | null |
import socket
import sys,os,time
from datetime import datetime
#Ahmet Orbay 100401053
def setComputerTime(totalTime):
command=datetime.fromtimestamp(totalTime)
os.system("sudo date --s '%s'" % command)
def ConnectionServerTime(server,port):
try:
server_address = (server, int(port))
print('connecting to %s port %s' % server_address)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(server_address)
sock.settimeout(5)
except socket.error as error:
print("baglanti saglanamadi",error)
sys.exit()
try:
SendMessageTime=time.time()
data = sock.recv(1024)
timeSpent=(time.time()-SendMessageTime)*1000
msSplit=(data.decode()).split(" ")
ms=msSplit[0].split(".")
totalTime=(float(ms[0])+float(timeSpent))/1000
setComputerTime(totalTime)
print("server response : ",data.decode())
print("Converted Time : %s" %datetime.fromtimestamp(totalTime),msSplit[1])
except socket.error as error:
print(error)
finally:
sock.close()
if __name__ == '__main__':
server=input("ip adresini giriniz : ")
port=input("port numarasini giriniz : ")
ConnectionServerTime(server,port)
| 29.340909 | 82 | 0.63749 |
1d2af1023baccf44a83df198205873c5a037c772
| 4,596 |
py
|
Python
|
tests/forecast/test_forecast_ensemble.py
|
mbignotti/Merlion
|
195b6828d7c147c42fc62a59c97076b597bd590d
|
[
"BSD-3-Clause"
] | 2,215 |
2021-09-21T18:11:36.000Z
|
2022-03-31T20:21:42.000Z
|
tests/forecast/test_forecast_ensemble.py
|
mbignotti/Merlion
|
195b6828d7c147c42fc62a59c97076b597bd590d
|
[
"BSD-3-Clause"
] | 56 |
2021-09-22T14:45:50.000Z
|
2022-03-29T22:28:19.000Z
|
tests/forecast/test_forecast_ensemble.py
|
mbignotti/Merlion
|
195b6828d7c147c42fc62a59c97076b597bd590d
|
[
"BSD-3-Clause"
] | 181 |
2021-09-22T09:04:22.000Z
|
2022-03-29T16:04:43.000Z
|
#
# Copyright (c) 2022 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import logging
from os.path import abspath, dirname, join
import sys
import unittest
import numpy as np
from merlion.models.ensemble.forecast import ForecasterEnsemble, ForecasterEnsembleConfig
from merlion.models.ensemble.combine import ModelSelector, Mean
from merlion.evaluate.forecast import ForecastMetric
from merlion.models.automl.autoprophet import AutoProphet, AutoProphetConfig, PeriodicityStrategy
from merlion.models.forecast.arima import Arima, ArimaConfig
from merlion.models.factory import ModelFactory
from merlion.transform.base import Identity
from merlion.transform.resample import TemporalResample
from merlion.utils.data_io import csv_to_time_series
logger = logging.getLogger(__name__)
rootdir = dirname(dirname(dirname(abspath(__file__))))
class TestForecastEnsemble(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.csv_name = join(rootdir, "data", "example.csv")
self.test_len = 2048
data = csv_to_time_series(self.csv_name, timestamp_unit="ms", data_cols=["kpi"])[::10]
self.vals_train = data[: -self.test_len]
self.vals_test = data[-self.test_len :].univariates[data.names[0]]
model0 = Arima(ArimaConfig(order=(6, 1, 2), max_forecast_steps=50, transform=TemporalResample("1h")))
model1 = Arima(ArimaConfig(order=(24, 1, 0), transform=TemporalResample("10min"), max_forecast_steps=50))
model2 = AutoProphet(
config=AutoProphetConfig(transform=Identity(), periodicity_strategy=PeriodicityStrategy.Max)
)
self.ensemble = ForecasterEnsemble(
models=[model0, model1, model2], config=ForecasterEnsembleConfig(combiner=Mean(abs_score=False))
)
def test_mean(self):
print("-" * 80)
self.expected_smape = 37
self.ensemble.models[0].config.max_forecast_steps = None
self.ensemble.models[1].config.max_forecast_steps = None
logger.info("test_mean\n" + "-" * 80 + "\n")
self.run_test()
def test_selector(self):
print("-" * 80)
self.expected_smape = 35
logger.info("test_selector\n" + "-" * 80 + "\n")
self.ensemble.config.combiner = ModelSelector(metric=ForecastMetric.sMAPE)
self.run_test()
# We expect the model selector to select Prophet because it gets the lowest validation sMAPE
valid_smapes = np.asarray(self.ensemble.combiner.metric_values)
self.assertAlmostEqual(np.max(np.abs(valid_smapes - [34.32, 40.66, 30.71])), 0, delta=0.5)
self.assertSequenceEqual(self.ensemble.models_used, [False, False, True])
def run_test(self):
logger.info("Training model...")
self.ensemble.train(self.vals_train)
# generate alarms for the test sequence using the ensemble
# this will return an aggregated alarms from all the models inside the ensemble
yhat, _ = self.ensemble.forecast(self.vals_test.time_stamps)
yhat = yhat.univariates[yhat.names[0]].np_values
logger.info("forecast looks like " + str(yhat[:3]))
self.assertEqual(len(yhat), len(self.vals_test))
logger.info("Testing save/load...")
self.ensemble.save(join(rootdir, "tmp", "forecast_ensemble"), save_only_used_models=True)
ensemble = ForecasterEnsemble.load(join(rootdir, "tmp", "forecast_ensemble"))
loaded_yhat = ensemble.forecast(self.vals_test.time_stamps)[0]
loaded_yhat = loaded_yhat.univariates[loaded_yhat.names[0]].np_values
self.assertSequenceEqual(list(yhat), list(loaded_yhat))
# serialize and deserialize
obj = self.ensemble.to_bytes()
ensemble = ModelFactory.load_bytes(obj)
loaded_yhat = ensemble.forecast(self.vals_test.time_stamps)[0]
loaded_yhat = loaded_yhat.univariates[loaded_yhat.names[0]].np_values
self.assertSequenceEqual(list(yhat), list(loaded_yhat))
# test sMAPE
y = self.vals_test.np_values
smape = np.mean(200.0 * np.abs((y - yhat) / (np.abs(y) + np.abs(yhat))))
logger.info(f"sMAPE = {smape:.4f}")
self.assertAlmostEqual(smape, self.expected_smape, delta=1)
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", stream=sys.stdout, level=logging.DEBUG
)
unittest.main()
| 45.058824 | 119 | 0.699086 |
385a19024d8770c1df32dd3538b1925706a04134
| 4,770 |
py
|
Python
|
examples/nas/search_space_zoo/enas_micro_example.py
|
tblanchart/nni
|
bbb9137c323316d6de04e15d42cf6dc47a889fcc
|
[
"MIT"
] | 2 |
2020-08-11T14:04:21.000Z
|
2020-09-28T05:39:57.000Z
|
examples/nas/search_space_zoo/enas_micro_example.py
|
ajeets1978/nni
|
41312de5dcbe8ba1f59c08a7b62fd5207f623804
|
[
"MIT"
] | 21 |
2020-11-13T19:01:01.000Z
|
2022-02-27T09:12:51.000Z
|
examples/nas/search_space_zoo/enas_micro_example.py
|
johnarthur1/nni
|
2d026a13f97612bc7961d5a55f5088a169de9ee9
|
[
"MIT"
] | null | null | null |
import torch
import logging
import torch.nn as nn
import torch.nn.functional as F
from argparse import ArgumentParser
from torchvision import transforms
from torchvision.datasets import CIFAR10
from nni.nas.pytorch import enas
from utils import accuracy, reward_accuracy
from nni.nas.pytorch.callbacks import (ArchitectureCheckpoint,
LRSchedulerCallback)
from nni.nas.pytorch.search_space_zoo import ENASMicroLayer
logger = logging.getLogger('nni')
def get_dataset(cls):
MEAN = [0.49139968, 0.48215827, 0.44653124]
STD = [0.24703233, 0.24348505, 0.26158768]
transf = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip()
]
normalize = [
transforms.ToTensor(),
transforms.Normalize(MEAN, STD)
]
train_transform = transforms.Compose(transf + normalize)
valid_transform = transforms.Compose(normalize)
if cls == "cifar10":
dataset_train = CIFAR10(root="./data", train=True, download=True, transform=train_transform)
dataset_valid = CIFAR10(root="./data", train=False, download=True, transform=valid_transform)
else:
raise NotImplementedError
return dataset_train, dataset_valid
class MicroNetwork(nn.Module):
def __init__(self, num_layers=2, num_nodes=5, out_channels=24, in_channels=3, num_classes=10,
dropout_rate=0.0):
super().__init__()
self.num_layers = num_layers
self.stem = nn.Sequential(
nn.Conv2d(in_channels, out_channels * 3, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels * 3)
)
pool_distance = self.num_layers // 3
pool_layers = [pool_distance, 2 * pool_distance + 1]
self.dropout = nn.Dropout(dropout_rate)
self.layers = nn.ModuleList()
c_pp = c_p = out_channels * 3
c_cur = out_channels
for layer_id in range(self.num_layers + 2):
reduction = False
if layer_id in pool_layers:
c_cur, reduction = c_p * 2, True
self.layers.append(ENASMicroLayer(self.layers, num_nodes, c_pp, c_p, c_cur, reduction))
if reduction:
c_pp = c_p = c_cur
c_pp, c_p = c_p, c_cur
self.gap = nn.AdaptiveAvgPool2d(1)
self.dense = nn.Linear(c_cur, num_classes)
self.reset_parameters()
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
def forward(self, x):
bs = x.size(0)
prev = cur = self.stem(x)
# aux_logits = None
for layer in self.layers:
prev, cur = layer(prev, cur)
cur = self.gap(F.relu(cur)).view(bs, -1)
cur = self.dropout(cur)
logits = self.dense(cur)
# if aux_logits is not None:
# return logits, aux_logits
return logits
if __name__ == "__main__":
parser = ArgumentParser("enas")
parser.add_argument("--batch-size", default=128, type=int)
parser.add_argument("--log-frequency", default=10, type=int)
# parser.add_argument("--search-for", choices=["macro", "micro"], default="macro")
parser.add_argument("--epochs", default=None, type=int, help="Number of epochs (default: macro 310, micro 150)")
parser.add_argument("--visualization", default=False, action="store_true")
args = parser.parse_args()
dataset_train, dataset_valid = get_dataset("cifar10")
model = MicroNetwork(num_layers=6, out_channels=20, num_nodes=5, dropout_rate=0.1)
num_epochs = args.epochs or 150
mutator = enas.EnasMutator(model, tanh_constant=1.1, cell_exit_extra_step=True)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), 0.05, momentum=0.9, weight_decay=1.0E-4)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs, eta_min=0.001)
trainer = enas.EnasTrainer(model,
loss=criterion,
metrics=accuracy,
reward_function=reward_accuracy,
optimizer=optimizer,
callbacks=[LRSchedulerCallback(lr_scheduler), ArchitectureCheckpoint("./checkpoints")],
batch_size=args.batch_size,
num_epochs=num_epochs,
dataset_train=dataset_train,
dataset_valid=dataset_valid,
log_frequency=args.log_frequency,
mutator=mutator)
if args.visualization:
trainer.enable_visualization()
trainer.train()
| 36.136364 | 118 | 0.621593 |
12dd1a66998ef6595073e613c4b246e981e7cfbe
| 12,729 |
py
|
Python
|
nevergrad/benchmark/xpbase.py
|
mehrdad-shokri/nevergrad
|
7b68b00c158bf60544bc45997560edf733fb5812
|
[
"MIT"
] | 2 |
2021-04-13T12:14:46.000Z
|
2021-07-07T14:37:50.000Z
|
nevergrad/benchmark/xpbase.py
|
mehrdad-shokri/nevergrad
|
7b68b00c158bf60544bc45997560edf733fb5812
|
[
"MIT"
] | 1 |
2020-09-25T10:45:06.000Z
|
2020-09-25T11:51:13.000Z
|
nevergrad/benchmark/xpbase.py
|
mehrdad-shokri/nevergrad
|
7b68b00c158bf60544bc45997560edf733fb5812
|
[
"MIT"
] | 1 |
2021-04-07T10:34:20.000Z
|
2021-04-07T10:34:20.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import time
import random
import numbers
import warnings
import traceback
import typing as tp
import numpy as np
from nevergrad.parametrization import parameter as p
from ..common import decorators
from ..functions.rl.agents import torch # import includes pytorch fix
from ..functions import base as fbase
from ..optimization import base as obase
from ..optimization.optimizerlib import registry as optimizer_registry # import from optimizerlib so as to fill it
from . import execution
registry: decorators.Registry[tp.Callable[..., tp.Iterator['Experiment']]] = decorators.Registry()
# pylint: disable=unused-argument
def _assert_monoobjective_callback(optimizer: obase.Optimizer, candidate: p.Parameter, loss: float) -> None:
if optimizer.num_tell <= 1 and not isinstance(loss, numbers.Number):
raise TypeError(f"Cannot process loss {loss} of type {type(loss)}.\n"
"For multiobjective functions, did you forget to specify 'func.multiobjective_upper_bounds'?")
class OptimizerSettings:
"""Handle for optimizer settings (name, num_workers etc)
Optimizers can be instantiated through this class, providing the optimization space dimension.
Note
----
Eventually, this class should be moved to be directly used for defining experiments.
"""
def __init__(
self,
optimizer: tp.Union[str, obase.ConfiguredOptimizer],
budget: int,
num_workers: int = 1,
batch_mode: bool = True
) -> None:
self._setting_names = [x for x in locals() if x != "self"]
if isinstance(optimizer, str):
assert optimizer in optimizer_registry, f"{optimizer} is not registered"
self.optimizer = optimizer
self.budget = budget
self.num_workers = num_workers
self.executor = execution.MockedTimedExecutor(batch_mode)
@property
def name(self) -> str:
return self.optimizer if isinstance(self.optimizer, str) else repr(self.optimizer)
@property
def batch_mode(self) -> bool:
return self.executor.batch_mode
def __repr__(self) -> str:
return f"Experiment: {self.name}<budget={self.budget}, num_workers={self.num_workers}, batch_mode={self.batch_mode}>"
def _get_factory(self) -> tp.Union[tp.Type[obase.Optimizer], obase.ConfiguredOptimizer]:
return optimizer_registry[self.optimizer] if isinstance(self.optimizer, str) else self.optimizer
@property
def is_incoherent(self) -> bool:
"""Flags settings which are known to be impossible to process.
Currently, this means we flag:
- no_parallelization optimizers for num_workers > 1
"""
# flag no_parallelization when num_workers greater than 1
return self._get_factory().no_parallelization and bool(self.num_workers > 1)
def instantiate(self, parametrization: p.Parameter) -> obase.Optimizer:
"""Instantiate an optimizer, providing the optimization space dimension
"""
return self._get_factory()(parametrization=parametrization, budget=self.budget, num_workers=self.num_workers)
def get_description(self) -> tp.Dict[str, tp.Any]:
"""Returns a dictionary describing the optimizer settings
"""
descr = {x: getattr(self, x) for x in self._setting_names if x != "optimizer"}
descr["optimizer_name"] = self.name
return descr
def __eq__(self, other: tp.Any) -> bool:
if isinstance(other, self.__class__):
for attr in self._setting_names:
x, y = (getattr(settings, attr) for settings in [self, other])
if x != y:
return False
return True
return False
def create_seed_generator(seed: tp.Optional[int]) -> tp.Iterator[tp.Optional[int]]:
"""Create a stream of seeds, independent from the standard random stream.
This is designed to be used in experiment plans generators, fore reproducibility.
Parameter
---------
seed: int or None
the initial seed
Yields
------
int or None
potential new seeds, or None if the initial seed was None
"""
generator = None if seed is None else np.random.RandomState(seed=seed)
while True:
yield None if generator is None else generator.randint(2**32, dtype=np.uint32)
class Experiment:
"""Specifies an experiment which can be run in benchmarks.
Parameters
----------
function: ExperimentFunction
the function to run the experiment on. It must inherit from ExperimentFunction to implement
necessary functionalities (parametrization, descriptors, evaluation_function, pseudotime etc)
Note
----
- "run" method catches error but forwards stderr so that errors are not completely hidden
- "run" method outputs the description of the experiment, which is a set of figures/names from the functions
settings (dimension, etc...), the optimization settings (budget, etc...) and the results (loss, etc...)
"""
# pylint: disable=too-many-arguments
def __init__(self, function: fbase.ExperimentFunction,
optimizer: tp.Union[str, obase.ConfiguredOptimizer], budget: int, num_workers: int = 1,
batch_mode: bool = True, seed: tp.Optional[int] = None,
) -> None:
assert isinstance(function, fbase.ExperimentFunction), ("All experiment functions should "
"derive from ng.functions.ExperimentFunction")
assert function.dimension, "Nothing to optimize"
self.function = function
self.seed = seed # depending on the inner workings of the function, the experiment may not be repeatable
self.optimsettings = OptimizerSettings(optimizer=optimizer, num_workers=num_workers, budget=budget, batch_mode=batch_mode)
self.result = {"loss": np.nan, "elapsed_budget": np.nan, "elapsed_time": np.nan, "error": ""}
self.recommendation: tp.Optional[p.Parameter] = None
self._optimizer: tp.Optional[obase.Optimizer] = None # to be able to restore stopped/checkpointed optimizer
# make sure the random_state of the base function is created, so that spawning copy does not
# trigger a seed for the base function, but only for the copied function
self.function.parametrization.random_state # pylint: disable=pointless-statement
def __repr__(self) -> str:
return f"Experiment: {self.optimsettings} (dim={self.function.dimension}) on {self.function} with seed {self.seed}"
@property
def is_incoherent(self) -> bool:
"""Flags settings which are known to be impossible to process.
Currently, this means we flag:
- no_parallelization optimizers for num_workers > 1
"""
return self.optimsettings.is_incoherent
def run(self) -> tp.Dict[str, tp.Any]:
"""Run an experiment with the provided settings
Returns
-------
dict
A dict containing all the information about the experiments (optimizer/function settings + results)
Note
----
This function catches error (but forwards stderr). It fills up the "error" ("" if no error, else the error name),
"loss", "elapsed_time" and "elapsed_budget" of the experiment.
"""
try:
self._run_with_error()
except fbase.ExperimentFunctionCopyError as c_e:
raise c_e
except Exception as e: # pylint: disable=broad-except
# print the case and the traceback
self.result["error"] = e.__class__.__name__
print(f"Error when applying {self}:", file=sys.stderr)
traceback.print_exc()
print("\n", file=sys.stderr)
return self.get_description()
def _log_results(self, pfunc: fbase.ExperimentFunction, t0: float, num_calls: int) -> None:
"""Internal method for logging results before handling the error
"""
self.result["elapsed_time"] = time.time() - t0
self.result["pseudotime"] = self.optimsettings.executor.time
# make a final evaluation with oracle (no noise, but function may still be stochastic)
assert self.recommendation is not None
reco = self.recommendation
assert self._optimizer is not None
if self._optimizer._hypervolume_pareto is None:
# ExperimentFunction can directly override this if need be
self.result["loss"] = pfunc.evaluation_function(*reco.args, **reco.kwargs)
else:
# in multiobjective case, use best hypervolume so far
self.result["loss"] = -self._optimizer._hypervolume_pareto._best_volume
self.result["elapsed_budget"] = num_calls
if num_calls > self.optimsettings.budget:
raise RuntimeError(f"Too much elapsed budget {num_calls} for {self.optimsettings.name} on {self.function}")
def _run_with_error(self, callbacks: tp.Optional[tp.Dict[str, obase._OptimCallBack]] = None) -> None:
"""Run an experiment with the provided artificial function and optimizer
Parameter
---------
callbacks: dict
a dictionary of callbacks to register on the optimizer with key "ask" and/or "tell" (see base Optimizer class).
This is only for easier debugging.
"""
if self.seed is not None and self._optimizer is None:
# Note: when resuming a job (if optimizer is not None), seeding is pointless (reproducibility is lost)
np.random.seed(self.seed) # seeds both functions and parametrization (for which random state init is lazy)
random.seed(self.seed)
torch.manual_seed(self.seed) # type: ignore
pfunc = self.function.copy()
# check constraints are propagated
assert len(pfunc.parametrization._constraint_checkers) == len(self.function.parametrization._constraint_checkers)
# optimizer instantiation can be slow and is done only here to make xp iterators very fast
if self._optimizer is None:
self._optimizer = self.optimsettings.instantiate(parametrization=pfunc.parametrization)
if pfunc.multiobjective_upper_bounds is not None:
self._optimizer.tell(p.MultiobjectiveReference(), pfunc.multiobjective_upper_bounds)
else:
self._optimizer.register_callback("tell", _assert_monoobjective_callback)
if callbacks is not None:
for name, func in callbacks.items():
self._optimizer.register_callback(name, func)
assert self._optimizer.budget is not None, "A budget must be provided"
t0 = time.time()
executor = self.optimsettings.executor
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=obase.InefficientSettingsWarning) # benchmark do not need to be efficient
try:
# call the actual Optimizer.minimize method because overloaded versions could alter the worklflow
# and provide unfair comparisons (especially for parallelized settings)
self.recommendation = obase.Optimizer.minimize(
self._optimizer,
pfunc,
batch_mode=executor.batch_mode,
executor=executor,
)
except Exception as e: # pylint: disable=broad-except
self.recommendation = self._optimizer.provide_recommendation() # get the recommendation anyway
self._log_results(pfunc, t0, self._optimizer.num_ask)
raise e
self._log_results(pfunc, t0, self._optimizer.num_ask)
def get_description(self) -> tp.Dict[str, tp.Union[str, float, bool]]:
"""Return the description of the experiment, as a dict.
"run" must be called beforehand in order to have non-nan values for the loss.
"""
summary = dict(self.result, seed=-1 if self.seed is None else self.seed)
summary.update(self.function.descriptors)
summary.update(self.optimsettings.get_description())
return summary
def __eq__(self, other: tp.Any) -> bool:
if not isinstance(other, Experiment):
return False
same_seed = other.seed is None if self.seed is None else other.seed == self.seed
return same_seed and self.function.equivalent_to(other.function) and self.optimsettings == other.optimsettings
| 46.97048 | 130 | 0.668238 |
b5d2b6c3ec84a6930c59daaa47643def19c125ea
| 12,635 |
py
|
Python
|
config/settings/base.py
|
BuildForSDGCohort2/disposify
|
1fad6644e4693eb8a31119430e4129fc4cdf09f5
|
[
"MIT"
] | null | null | null |
config/settings/base.py
|
BuildForSDGCohort2/disposify
|
1fad6644e4693eb8a31119430e4129fc4cdf09f5
|
[
"MIT"
] | 8 |
2020-10-01T11:29:44.000Z
|
2020-10-07T08:55:49.000Z
|
config/settings/base.py
|
BuildForSDGCohort2/wasteline
|
1fad6644e4693eb8a31119430e4129fc4cdf09f5
|
[
"MIT"
] | 1 |
2020-10-03T15:17:20.000Z
|
2020-10-03T15:17:20.000Z
|
"""
Base settings to build other settings files upon.
"""
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
# disposify/
APPS_DIR = ROOT_DIR / "disposify"
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR / ".env"))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "Africa/Lagos"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-us"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [str(ROOT_DIR / "locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL", default="postgres:///disposify")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
"django.forms",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.facebook",
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
"tailwind",
"widget_tweaks",
]
LOCAL_APPS = [
"theme",
"disposify.users.apps.UsersConfig",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "disposify.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = "users:redirect"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = "account_login"
# https://django-allauth.readthedocs.io/en/latest/providers.html#facebook
SOCIALACCOUNT_PROVIDERS = {
"facebook": {
"METHOD": "oauth2",
"SCOPE": ["email", "public_profile"],
"AUTH_PARAMS": {"auth_type": "reauthenticate"},
"INIT_PARAMS": {"cookie": True},
"FIELDS": [
"id",
"first_name",
"last_name",
"middle_name",
"name",
"name_format",
"picture",
"short_name",
],
"EXCHANGE_TOKEN": True,
"LOCALE_FUNC": lambda request: "en_US",
"VERIFIED_EMAIL": False,
"VERSION": "v7.0",
}
}
# facebook
SOCIAL_AUTH_FACEBOOK_KEY = "secret!" # App ID
SOCIAL_AUTH_FACEBOOK_SECRET = "secret!" # app key
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR / "theme/static")]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR / "theme/templates"), str(APPS_DIR / "templates")],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"disposify.utils.context_processors.settings_context",
],
},
}
]
# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""JAY""", "[email protected]")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = "username"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = "disposify.users.adapters.AccountAdapter"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = "disposify.users.adapters.SocialAccountAdapter"
# my custom signup form
ACCOUNT_FORMS = {
"signup": "disposify.users.forms.disposifySignUpForm",
}
# django-rest-framework
# -------------------------------------------------------------------------------
# django-rest-framework - https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
}
# django-cors-headers - https://github.com/adamchainz/django-cors-headers#setup
CORS_URLS_REGEX = r"^/api/.*$"
# Your stuff...
# ------------------------------------------------------------------------------
TAILWIND_APP_NAME = "theme"
| 39.361371 | 93 | 0.628255 |
16e76517e485cf0a5c4f9b643ffb5874b8327122
| 83,447 |
py
|
Python
|
lib/extern/closure-linter/gflags.py
|
moonshadowmobile/cast
|
5acaabdb711c208aadeb74c96dfdd663c0aac1c2
|
[
"Apache-2.0"
] | 7 |
2015-07-15T20:11:05.000Z
|
2021-05-22T16:07:07.000Z
|
lib/extern/closure-linter/gflags.py
|
moonshadowmobile/cast
|
5acaabdb711c208aadeb74c96dfdd663c0aac1c2
|
[
"Apache-2.0"
] | 4 |
2018-02-13T17:20:10.000Z
|
2018-02-13T17:20:41.000Z
|
lib/extern/closure-linter/gflags.py
|
moonshadowmobile/cast
|
5acaabdb711c208aadeb74c96dfdd663c0aac1c2
|
[
"Apache-2.0"
] | 1 |
2015-04-14T12:54:31.000Z
|
2015-04-14T12:54:31.000Z
|
#!/usr/bin/env python
# Copyright (c) 2007, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ---
# Author: Chad Lester
# Design and style contributions by:
# Amit Patel, Bogdan Cocosel, Daniel Dulitz, Eric Tiedemann,
# Eric Veach, Laurence Gonsalves, Matthew Springer
# Code reorganized a bit by Craig Silverstein
"""This module is used to define and parse command line flags.
This module defines a *distributed* flag-definition policy: rather than
an application having to define all flags in or near main(), each python
module defines flags that are useful to it. When one python module
imports another, it gains access to the other's flags. (This is
implemented by having all modules share a common, global registry object
containing all the flag information.)
Flags are defined through the use of one of the DEFINE_xxx functions.
The specific function used determines how the flag is parsed, checked,
and optionally type-converted, when it's seen on the command line.
IMPLEMENTATION: DEFINE_* creates a 'Flag' object and registers it with a
'FlagValues' object (typically the global FlagValues FLAGS, defined
here). The 'FlagValues' object can scan the command line arguments and
pass flag arguments to the corresponding 'Flag' objects for
value-checking and type conversion. The converted flag values are
available as attributes of the 'FlagValues' object.
Code can access the flag through a FlagValues object, for instance
gflags.FLAGS.myflag. Typically, the __main__ module passes the
command line arguments to gflags.FLAGS for parsing.
At bottom, this module calls getopt(), so getopt functionality is
supported, including short- and long-style flags, and the use of -- to
terminate flags.
Methods defined by the flag module will throw 'FlagsError' exceptions.
The exception argument will be a human-readable string.
FLAG TYPES: This is a list of the DEFINE_*'s that you can do. All flags
take a name, default value, help-string, and optional 'short' name
(one-letter name). Some flags have other arguments, which are described
with the flag.
DEFINE_string: takes any input, and interprets it as a string.
DEFINE_bool or
DEFINE_boolean: typically does not take an argument: say --myflag to
set FLAGS.myflag to true, or --nomyflag to set
FLAGS.myflag to false. Alternately, you can say
--myflag=true or --myflag=t or --myflag=1 or
--myflag=false or --myflag=f or --myflag=0
DEFINE_float: takes an input and interprets it as a floating point
number. Takes optional args lower_bound and upper_bound;
if the number specified on the command line is out of
range, it will raise a FlagError.
DEFINE_integer: takes an input and interprets it as an integer. Takes
optional args lower_bound and upper_bound as for floats.
DEFINE_enum: takes a list of strings which represents legal values. If
the command-line value is not in this list, raise a flag
error. Otherwise, assign to FLAGS.flag as a string.
DEFINE_list: Takes a comma-separated list of strings on the commandline.
Stores them in a python list object.
DEFINE_spaceseplist: Takes a space-separated list of strings on the
commandline. Stores them in a python list object.
Example: --myspacesepflag "foo bar baz"
DEFINE_multistring: The same as DEFINE_string, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of strings),
even if the flag is only on the command line once.
DEFINE_multi_int: The same as DEFINE_integer, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of ints), even if
the flag is only on the command line once.
SPECIAL FLAGS: There are a few flags that have special meaning:
--help prints a list of all the flags in a human-readable fashion
--helpshort prints a list of all key flags (see below).
--helpxml prints a list of all flags, in XML format. DO NOT parse
the output of --help and --helpshort. Instead, parse
the output of --helpxml. As we add new flags, we may
add new XML elements. Hence, make sure your parser
does not crash when it encounters new XML elements.
--flagfile=foo read flags from foo.
--undefok=f1,f2 ignore unrecognized option errors for f1,f2.
For boolean flags, you should use --undefok=boolflag, and
--boolflag and --noboolflag will be accepted. Do not use
--undefok=noboolflag.
-- as in getopt(), terminates flag-processing
NOTE ON --flagfile:
Flags may be loaded from text files in addition to being specified on
the commandline.
Any flags you don't feel like typing, throw them in a file, one flag per
line, for instance:
--myflag=myvalue
--nomyboolean_flag
You then specify your file with the special flag '--flagfile=somefile'.
You CAN recursively nest flagfile= tokens OR use multiple files on the
command line. Lines beginning with a single hash '#' or a double slash
'//' are comments in your flagfile.
Any flagfile=<file> will be interpreted as having a relative path from
the current working directory rather than from the place the file was
included from:
myPythonScript.py --flagfile=config/somefile.cfg
If somefile.cfg includes further --flagfile= directives, these will be
referenced relative to the original CWD, not from the directory the
including flagfile was found in!
The caveat applies to people who are including a series of nested files
in a different dir than they are executing out of. Relative path names
are always from CWD, not from the directory of the parent include
flagfile. We do now support '~' expanded directory names.
Absolute path names ALWAYS work!
EXAMPLE USAGE:
import gflags
FLAGS = gflags.FLAGS
# Flag names are globally defined! So in general, we need to be
# careful to pick names that are unlikely to be used by other libraries.
# If there is a conflict, we'll get an error at import time.
gflags.DEFINE_string('name', 'Mr. President', 'your name')
gflags.DEFINE_integer('age', None, 'your age in years', lower_bound=0)
gflags.DEFINE_boolean('debug', False, 'produces debugging output')
gflags.DEFINE_enum('gender', 'male', ['male', 'female'], 'your gender')
def main(argv):
try:
argv = FLAGS(argv) # parse flags
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
if FLAGS.debug: print 'non-flag arguments:', argv
print 'Happy Birthday', FLAGS.name
if FLAGS.age is not None:
print 'You are a %s, who is %d years old' % (FLAGS.gender, FLAGS.age)
if __name__ == '__main__':
main(sys.argv)
KEY FLAGS:
As we already explained, each module gains access to all flags defined
by all the other modules it transitively imports. In the case of
non-trivial scripts, this means a lot of flags ... For documentation
purposes, it is good to identify the flags that are key (i.e., really
important) to a module. Clearly, the concept of "key flag" is a
subjective one. When trying to determine whether a flag is key to a
module or not, assume that you are trying to explain your module to a
potential user: which flags would you really like to mention first?
We'll describe shortly how to declare which flags are key to a module.
For the moment, assume we know the set of key flags for each module.
Then, if you use the app.py module, you can use the --helpshort flag to
print only the help for the flags that are key to the main module, in a
human-readable format.
NOTE: If you need to parse the flag help, do NOT use the output of
--help / --helpshort. That output is meant for human consumption, and
may be changed in the future. Instead, use --helpxml; flags that are
key for the main module are marked there with a <key>yes</key> element.
The set of key flags for a module M is composed of:
1. Flags defined by module M by calling a DEFINE_* function.
2. Flags that module M explictly declares as key by using the function
DECLARE_key_flag(<flag_name>)
3. Key flags of other modules that M specifies by using the function
ADOPT_module_key_flags(<other_module>)
This is a "bulk" declaration of key flags: each flag that is key for
<other_module> becomes key for the current module too.
Notice that if you do not use the functions described at points 2 and 3
above, then --helpshort prints information only about the flags defined
by the main module of our script. In many cases, this behavior is good
enough. But if you move part of the main module code (together with the
related flags) into a different module, then it is nice to use
DECLARE_key_flag / ADOPT_module_key_flags and make sure --helpshort
lists all relevant flags (otherwise, your code refactoring may confuse
your users).
Note: each of DECLARE_key_flag / ADOPT_module_key_flags has its own
pluses and minuses: DECLARE_key_flag is more targeted and may lead a
more focused --helpshort documentation. ADOPT_module_key_flags is good
for cases when an entire module is considered key to the current script.
Also, it does not require updates to client scripts when a new flag is
added to the module.
EXAMPLE USAGE 2 (WITH KEY FLAGS):
Consider an application that contains the following three files (two
auxiliary modules and a main module):
File libfoo.py:
import gflags
gflags.DEFINE_integer('num_replicas', 3, 'Number of replicas to start')
gflags.DEFINE_boolean('rpc2', True, 'Turn on the usage of RPC2.')
... some code ...
File libbar.py:
import gflags
gflags.DEFINE_string('bar_gfs_path', '/gfs/path',
'Path to the GFS files for libbar.')
gflags.DEFINE_string('email_for_bar_errors', '[email protected]',
'Email address for bug reports about module libbar.')
gflags.DEFINE_boolean('bar_risky_hack', False,
'Turn on an experimental and buggy optimization.')
... some code ...
File myscript.py:
import gflags
import libfoo
import libbar
gflags.DEFINE_integer('num_iterations', 0, 'Number of iterations.')
# Declare that all flags that are key for libfoo are
# key for this module too.
gflags.ADOPT_module_key_flags(libfoo)
# Declare that the flag --bar_gfs_path (defined in libbar) is key
# for this module.
gflags.DECLARE_key_flag('bar_gfs_path')
... some code ...
When myscript is invoked with the flag --helpshort, the resulted help
message lists information about all the key flags for myscript:
--num_iterations, --num_replicas, --rpc2, and --bar_gfs_path (in
addition to the special flags --help and --helpshort).
Of course, myscript uses all the flags declared by it (in this case,
just --num_replicas) or by any of the modules it transitively imports
(e.g., the modules libfoo, libbar). E.g., it can access the value of
FLAGS.bar_risky_hack, even if --bar_risky_hack is not declared as a key
flag for myscript.
"""
import cgi
import getopt
import os
import re
import string
import sys
# Are we running at least python 2.2?
try:
if tuple(sys.version_info[:3]) < (2,2,0):
raise NotImplementedError("requires python 2.2.0 or later")
except AttributeError: # a very old python, that lacks sys.version_info
raise NotImplementedError("requires python 2.2.0 or later")
# If we're not running at least python 2.2.1, define True, False, and bool.
# Thanks, Guido, for the code.
try:
True, False, bool
except NameError:
False = 0
True = 1
def bool(x):
if x:
return True
else:
return False
# Are we running under pychecker?
_RUNNING_PYCHECKER = 'pychecker.python' in sys.modules
def _GetCallingModule():
"""Returns the name of the module that's calling into this module.
We generally use this function to get the name of the module calling a
DEFINE_foo... function.
"""
# Walk down the stack to find the first globals dict that's not ours.
for depth in range(1, sys.getrecursionlimit()):
if not sys._getframe(depth).f_globals is globals():
module_name = __GetModuleName(sys._getframe(depth).f_globals)
if module_name is not None:
return module_name
raise AssertionError("No module was found")
# module exceptions:
class FlagsError(Exception):
"""The base class for all flags errors."""
pass
class DuplicateFlag(FlagsError):
"""Raised if there is a flag naming conflict."""
pass
# A DuplicateFlagError conveys more information than a
# DuplicateFlag. Since there are external modules that create
# DuplicateFlags, the interface to DuplicateFlag shouldn't change.
class DuplicateFlagError(DuplicateFlag):
def __init__(self, flagname, flag_values):
self.flagname = flagname
message = "The flag '%s' is defined twice." % self.flagname
flags_by_module = flag_values.FlagsByModuleDict()
for module in flags_by_module:
for flag in flags_by_module[module]:
if flag.name == flagname or flag.short_name == flagname:
message = message + " First from " + module + ","
break
message = message + " Second from " + _GetCallingModule()
DuplicateFlag.__init__(self, message)
class IllegalFlagValue(FlagsError):
"""The flag command line argument is illegal."""
pass
class UnrecognizedFlag(FlagsError):
"""Raised if a flag is unrecognized."""
pass
# An UnrecognizedFlagError conveys more information than an
# UnrecognizedFlag. Since there are external modules that create
# DuplicateFlags, the interface to DuplicateFlag shouldn't change.
class UnrecognizedFlagError(UnrecognizedFlag):
def __init__(self, flagname):
self.flagname = flagname
UnrecognizedFlag.__init__(
self, "Unknown command line flag '%s'" % flagname)
# Global variable used by expvar
_exported_flags = {}
_help_width = 80 # width of help output
def GetHelpWidth():
"""Returns: an integer, the width of help lines that is used in TextWrap."""
return _help_width
def CutCommonSpacePrefix(text):
"""Removes a common space prefix from the lines of a multiline text.
If the first line does not start with a space, it is left as it is and
only in the remaining lines a common space prefix is being searched
for. That means the first line will stay untouched. This is especially
useful to turn doc strings into help texts. This is because some
people prefer to have the doc comment start already after the
apostrophy and then align the following lines while others have the
apostrophies on a seperately line.
The function also drops trailing empty lines and ignores empty lines
following the initial content line while calculating the initial
common whitespace.
Args:
text: text to work on
Returns:
the resulting text
"""
text_lines = text.splitlines()
# Drop trailing empty lines
while text_lines and not text_lines[-1]:
text_lines = text_lines[:-1]
if text_lines:
# We got some content, is the first line starting with a space?
if text_lines[0] and text_lines[0][0].isspace():
text_first_line = []
else:
text_first_line = [text_lines.pop(0)]
# Calculate length of common leading whitesppace (only over content lines)
common_prefix = os.path.commonprefix([line for line in text_lines if line])
space_prefix_len = len(common_prefix) - len(common_prefix.lstrip())
# If we have a common space prefix, drop it from all lines
if space_prefix_len:
for index in xrange(len(text_lines)):
if text_lines[index]:
text_lines[index] = text_lines[index][space_prefix_len:]
return '\n'.join(text_first_line + text_lines)
return ''
def TextWrap(text, length=None, indent='', firstline_indent=None, tabs=' '):
"""Wraps a given text to a maximum line length and returns it.
We turn lines that only contain whitespaces into empty lines. We keep
new lines and tabs (e.g., we do not treat tabs as spaces).
Args:
text: text to wrap
length: maximum length of a line, includes indentation
if this is None then use GetHelpWidth()
indent: indent for all but first line
firstline_indent: indent for first line; if None, fall back to indent
tabs: replacement for tabs
Returns:
wrapped text
Raises:
FlagsError: if indent not shorter than length
FlagsError: if firstline_indent not shorter than length
"""
# Get defaults where callee used None
if length is None:
length = GetHelpWidth()
if indent is None:
indent = ''
if len(indent) >= length:
raise FlagsError('Indent must be shorter than length')
# In line we will be holding the current line which is to be started
# with indent (or firstline_indent if available) and then appended
# with words.
if firstline_indent is None:
firstline_indent = ''
line = indent
else:
line = firstline_indent
if len(firstline_indent) >= length:
raise FlagsError('First iline indent must be shorter than length')
# If the callee does not care about tabs we simply convert them to
# spaces If callee wanted tabs to be single space then we do that
# already here.
if not tabs or tabs == ' ':
text = text.replace('\t', ' ')
else:
tabs_are_whitespace = not tabs.strip()
line_regex = re.compile('([ ]*)(\t*)([^ \t]+)', re.MULTILINE)
# Split the text into lines and the lines with the regex above. The
# resulting lines are collected in result[]. For each split we get the
# spaces, the tabs and the next non white space (e.g. next word).
result = []
for text_line in text.splitlines():
# Store result length so we can find out whether processing the next
# line gave any new content
old_result_len = len(result)
# Process next line with line_regex. For optimization we do an rstrip().
# - process tabs (changes either line or word, see below)
# - process word (first try to squeeze on line, then wrap or force wrap)
# Spaces found on the line are ignored, they get added while wrapping as
# needed.
for spaces, current_tabs, word in line_regex.findall(text_line.rstrip()):
# If tabs weren't converted to spaces, handle them now
if current_tabs:
# If the last thing we added was a space anyway then drop
# it. But let's not get rid of the indentation.
if (((result and line != indent) or
(not result and line != firstline_indent)) and line[-1] == ' '):
line = line[:-1]
# Add the tabs, if that means adding whitespace, just add it at
# the line, the rstrip() code while shorten the line down if
# necessary
if tabs_are_whitespace:
line += tabs * len(current_tabs)
else:
# if not all tab replacement is whitespace we prepend it to the word
word = tabs * len(current_tabs) + word
# Handle the case where word cannot be squeezed onto current last line
if len(line) + len(word) > length and len(indent) + len(word) <= length:
result.append(line.rstrip())
line = indent + word
word = ''
# No space left on line or can we append a space?
if len(line) + 1 >= length:
result.append(line.rstrip())
line = indent
else:
line += ' '
# Add word and shorten it up to allowed line length. Restart next
# line with indent and repeat, or add a space if we're done (word
# finished) This deals with words that caanot fit on one line
# (e.g. indent + word longer than allowed line length).
while len(line) + len(word) >= length:
line += word
result.append(line[:length])
word = line[length:]
line = indent
# Default case, simply append the word and a space
if word:
line += word + ' '
# End of input line. If we have content we finish the line. If the
# current line is just the indent but we had content in during this
# original line then we need to add an emoty line.
if (result and line != indent) or (not result and line != firstline_indent):
result.append(line.rstrip())
elif len(result) == old_result_len:
result.append('')
line = indent
return '\n'.join(result)
def DocToHelp(doc):
"""Takes a __doc__ string and reformats it as help."""
# Get rid of starting and ending white space. Using lstrip() or even
# strip() could drop more than maximum of first line and right space
# of last line.
doc = doc.strip()
# Get rid of all empty lines
whitespace_only_line = re.compile('^[ \t]+$', re.M)
doc = whitespace_only_line.sub('', doc)
# Cut out common space at line beginnings
doc = CutCommonSpacePrefix(doc)
# Just like this module's comment, comments tend to be aligned somehow.
# In other words they all start with the same amount of white space
# 1) keep double new lines
# 2) keep ws after new lines if not empty line
# 3) all other new lines shall be changed to a space
# Solution: Match new lines between non white space and replace with space.
doc = re.sub('(?<=\S)\n(?=\S)', ' ', doc, re.M)
return doc
def __GetModuleName(globals_dict):
"""Given a globals dict, returns the name of the module that defines it.
Args:
globals_dict: A dictionary that should correspond to an environment
providing the values of the globals.
Returns:
A string (the name of the module) or None (if the module could not
be identified.
"""
for name, module in sys.modules.iteritems():
if getattr(module, '__dict__', None) is globals_dict:
if name == '__main__':
return sys.argv[0]
return name
return None
def _GetMainModule():
"""Returns the name of the module from which execution started."""
for depth in range(1, sys.getrecursionlimit()):
try:
globals_of_main = sys._getframe(depth).f_globals
except ValueError:
return __GetModuleName(globals_of_main)
raise AssertionError("No module was found")
class FlagValues:
"""Registry of 'Flag' objects.
A 'FlagValues' can then scan command line arguments, passing flag
arguments through to the 'Flag' objects that it owns. It also
provides easy access to the flag values. Typically only one
'FlagValues' object is needed by an application: gflags.FLAGS
This class is heavily overloaded:
'Flag' objects are registered via __setitem__:
FLAGS['longname'] = x # register a new flag
The .value attribute of the registered 'Flag' objects can be accessed
as attributes of this 'FlagValues' object, through __getattr__. Both
the long and short name of the original 'Flag' objects can be used to
access its value:
FLAGS.longname # parsed flag value
FLAGS.x # parsed flag value (short name)
Command line arguments are scanned and passed to the registered 'Flag'
objects through the __call__ method. Unparsed arguments, including
argv[0] (e.g. the program name) are returned.
argv = FLAGS(sys.argv) # scan command line arguments
The original registered Flag objects can be retrieved through the use
of the dictionary-like operator, __getitem__:
x = FLAGS['longname'] # access the registered Flag object
The str() operator of a 'FlagValues' object provides help for all of
the registered 'Flag' objects.
"""
def __init__(self):
# Since everything in this class is so heavily overloaded, the only
# way of defining and using fields is to access __dict__ directly.
# Dictionary: flag name (string) -> Flag object.
self.__dict__['__flags'] = {}
# Dictionary: module name (string) -> list of Flag objects that are defined
# by that module.
self.__dict__['__flags_by_module'] = {}
# Dictionary: module name (string) -> list of Flag objects that are
# key for that module.
self.__dict__['__key_flags_by_module'] = {}
# Set if we should use new style gnu_getopt rather than getopt when parsing
# the args. Only possible with Python 2.3+
self.UseGnuGetOpt(False)
def UseGnuGetOpt(self, use_gnu_getopt=True):
self.__dict__['__use_gnu_getopt'] = use_gnu_getopt
def IsGnuGetOpt(self):
return self.__dict__['__use_gnu_getopt']
def FlagDict(self):
return self.__dict__['__flags']
def FlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of defined flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__flags_by_module']
def KeyFlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of key flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__key_flags_by_module']
def _RegisterFlagByModule(self, module_name, flag):
"""Records the module that defines a specific flag.
We keep track of which flag is defined by which module so that we
can later sort the flags by module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module.
"""
flags_by_module = self.FlagsByModuleDict()
flags_by_module.setdefault(module_name, []).append(flag)
def _RegisterKeyFlagForModule(self, module_name, flag):
"""Specifies that a flag is a key flag for a module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module.
"""
key_flags_by_module = self.KeyFlagsByModuleDict()
# The list of key flags for the module named module_name.
key_flags = key_flags_by_module.setdefault(module_name, [])
# Add flag, but avoid duplicates.
if flag not in key_flags:
key_flags.append(flag)
def _GetFlagsDefinedByModule(self, module):
"""Returns the list of flags defined by a module.
Args:
module: A module object or a module name (a string).
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
return list(self.FlagsByModuleDict().get(module, []))
def _GetKeyFlagsForModule(self, module):
"""Returns the list of key flags for a module.
Args:
module: A module object or a module name (a string)
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
# Any flag is a key flag for the module that defined it. NOTE:
# key_flags is a fresh list: we can update it without affecting the
# internals of this FlagValues object.
key_flags = self._GetFlagsDefinedByModule(module)
# Take into account flags explicitly declared as key for a module.
for flag in self.KeyFlagsByModuleDict().get(module, []):
if flag not in key_flags:
key_flags.append(flag)
return key_flags
def AppendFlagValues(self, flag_values):
"""Appends flags registered in another FlagValues instance.
Args:
flag_values: registry to copy from
"""
for flag_name, flag in flag_values.FlagDict().iteritems():
# Each flags with shortname appears here twice (once under its
# normal name, and again with its short name). To prevent
# problems (DuplicateFlagError) with double flag registration, we
# perform a check to make sure that the entry we're looking at is
# for its normal name.
if flag_name == flag.name:
self[flag_name] = flag
def __setitem__(self, name, flag):
"""Registers a new flag variable."""
fl = self.FlagDict()
if not isinstance(flag, Flag):
raise IllegalFlagValue(flag)
if not isinstance(name, type("")):
raise FlagsError("Flag name must be a string")
if len(name) == 0:
raise FlagsError("Flag name cannot be empty")
# If running under pychecker, duplicate keys are likely to be
# defined. Disable check for duplicate keys when pycheck'ing.
if (fl.has_key(name) and not flag.allow_override and
not fl[name].allow_override and not _RUNNING_PYCHECKER):
raise DuplicateFlagError(name, self)
short_name = flag.short_name
if short_name is not None:
if (fl.has_key(short_name) and not flag.allow_override and
not fl[short_name].allow_override and not _RUNNING_PYCHECKER):
raise DuplicateFlagError(short_name, self)
fl[short_name] = flag
fl[name] = flag
global _exported_flags
_exported_flags[name] = flag
def __getitem__(self, name):
"""Retrieves the Flag object for the flag --name."""
return self.FlagDict()[name]
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the flag --name."""
fl = self.FlagDict()
if not fl.has_key(name):
raise AttributeError(name)
return fl[name].value
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the flag --name."""
fl = self.FlagDict()
fl[name].value = value
return value
def _FlagIsRegistered(self, flag_obj):
"""Checks whether a Flag object is registered under some name.
Note: this is non trivial: in addition to its normal name, a flag
may have a short name too. In self.FlagDict(), both the normal and
the short name are mapped to the same flag object. E.g., calling
only "del FLAGS.short_name" is not unregistering the corresponding
Flag object (it is still registered under the longer name).
Args:
flag_obj: A Flag object.
Returns:
A boolean: True iff flag_obj is registered under some name.
"""
flag_dict = self.FlagDict()
# Check whether flag_obj is registered under its long name.
name = flag_obj.name
if flag_dict.get(name, None) == flag_obj:
return True
# Check whether flag_obj is registered under its short name.
short_name = flag_obj.short_name
if (short_name is not None and
flag_dict.get(short_name, None) == flag_obj):
return True
# The flag cannot be registered under any other name, so we do not
# need to do a full search through the values of self.FlagDict().
return False
def __delattr__(self, flag_name):
"""Deletes a previously-defined flag from a flag object.
This method makes sure we can delete a flag by using
del flag_values_object.<flag_name>
E.g.,
flags.DEFINE_integer('foo', 1, 'Integer flag.')
del flags.FLAGS.foo
Args:
flag_name: A string, the name of the flag to be deleted.
Raises:
AttributeError: When there is no registered flag named flag_name.
"""
fl = self.FlagDict()
if flag_name not in fl:
raise AttributeError(flag_name)
flag_obj = fl[flag_name]
del fl[flag_name]
if not self._FlagIsRegistered(flag_obj):
# If the Flag object indicated by flag_name is no longer
# registered (please see the docstring of _FlagIsRegistered), then
# we delete the occurences of the flag object in all our internal
# dictionaries.
self.__RemoveFlagFromDictByModule(self.FlagsByModuleDict(), flag_obj)
self.__RemoveFlagFromDictByModule(self.KeyFlagsByModuleDict(), flag_obj)
def __RemoveFlagFromDictByModule(self, flags_by_module_dict, flag_obj):
"""Removes a flag object from a module -> list of flags dictionary.
Args:
flags_by_module_dict: A dictionary that maps module names to lists of
flags.
flag_obj: A flag object.
"""
for unused_module, flags_in_module in flags_by_module_dict.iteritems():
# while (as opposed to if) takes care of multiple occurences of a
# flag in the list for the same module.
while flag_obj in flags_in_module:
flags_in_module.remove(flag_obj)
def SetDefault(self, name, value):
"""Changes the default value of the named flag object."""
fl = self.FlagDict()
if not fl.has_key(name):
raise AttributeError(name)
fl[name].SetDefault(value)
def __contains__(self, name):
"""Returns True if name is a value (flag) in the dict."""
return name in self.FlagDict()
has_key = __contains__ # a synonym for __contains__()
def __iter__(self):
return self.FlagDict().iterkeys()
def __call__(self, argv):
"""Parses flags from argv; stores parsed flags into this FlagValues object.
All unparsed arguments are returned. Flags are parsed using the GNU
Program Argument Syntax Conventions, using getopt:
http://www.gnu.org/software/libc/manual/html_mono/libc.html#Getopt
Args:
argv: argument list. Can be of any type that may be converted to a list.
Returns:
The list of arguments not parsed as options, including argv[0]
Raises:
FlagsError: on any parsing error
"""
# Support any sequence type that can be converted to a list
argv = list(argv)
shortopts = ""
longopts = []
fl = self.FlagDict()
# This pre parses the argv list for --flagfile=<> options.
argv = self.ReadFlagsFromFiles(argv)
# Correct the argv to support the google style of passing boolean
# parameters. Boolean parameters may be passed by using --mybool,
# --nomybool, --mybool=(true|false|1|0). getopt does not support
# having options that may or may not have a parameter. We replace
# instances of the short form --mybool and --nomybool with their
# full forms: --mybool=(true|false).
original_argv = list(argv) # list() makes a copy
shortest_matches = None
for name, flag in fl.items():
if not flag.boolean:
continue
if shortest_matches is None:
# Determine the smallest allowable prefix for all flag names
shortest_matches = self.ShortestUniquePrefixes(fl)
no_name = 'no' + name
prefix = shortest_matches[name]
no_prefix = shortest_matches[no_name]
# Replace all occurences of this boolean with extended forms
for arg_idx in range(1, len(argv)):
arg = argv[arg_idx]
if arg.find('=') >= 0: continue
if arg.startswith('--'+prefix) and ('--'+name).startswith(arg):
argv[arg_idx] = ('--%s=true' % name)
elif arg.startswith('--'+no_prefix) and ('--'+no_name).startswith(arg):
argv[arg_idx] = ('--%s=false' % name)
# Loop over all of the flags, building up the lists of short options
# and long options that will be passed to getopt. Short options are
# specified as a string of letters, each letter followed by a colon
# if it takes an argument. Long options are stored in an array of
# strings. Each string ends with an '=' if it takes an argument.
for name, flag in fl.items():
longopts.append(name + "=")
if len(name) == 1: # one-letter option: allow short flag type also
shortopts += name
if not flag.boolean:
shortopts += ":"
longopts.append('undefok=')
undefok_flags = []
# In case --undefok is specified, loop to pick up unrecognized
# options one by one.
unrecognized_opts = []
args = argv[1:]
while True:
try:
if self.__dict__['__use_gnu_getopt']:
optlist, unparsed_args = getopt.gnu_getopt(args, shortopts, longopts)
else:
optlist, unparsed_args = getopt.getopt(args, shortopts, longopts)
break
except getopt.GetoptError, e:
if not e.opt or e.opt in fl:
# Not an unrecognized option, reraise the exception as a FlagsError
raise FlagsError(e)
# Handle an unrecognized option.
unrecognized_opts.append(e.opt)
# Remove offender from args and try again
for arg_index in range(len(args)):
if ((args[arg_index] == '--' + e.opt) or
(args[arg_index] == '-' + e.opt) or
args[arg_index].startswith('--' + e.opt + '=')):
args = args[0:arg_index] + args[arg_index+1:]
break
else:
# We should have found the option, so we don't expect to get
# here. We could assert, but raising the original exception
# might work better.
raise FlagsError(e)
for name, arg in optlist:
if name == '--undefok':
flag_names = arg.split(',')
undefok_flags.extend(flag_names)
# For boolean flags, if --undefok=boolflag is specified, then we should
# also accept --noboolflag, in addition to --boolflag.
# Since we don't know the type of the undefok'd flag, this will affect
# non-boolean flags as well.
# NOTE: You shouldn't use --undefok=noboolflag, because then we will
# accept --nonoboolflag here. We are choosing not to do the conversion
# from noboolflag -> boolflag because of the ambiguity that flag names
# can start with 'no'.
undefok_flags.extend('no' + name for name in flag_names)
continue
if name.startswith('--'):
# long option
name = name[2:]
short_option = 0
else:
# short option
name = name[1:]
short_option = 1
if fl.has_key(name):
flag = fl[name]
if flag.boolean and short_option: arg = 1
flag.Parse(arg)
# If there were unrecognized options, raise an exception unless
# the options were named via --undefok.
for opt in unrecognized_opts:
if opt not in undefok_flags:
raise UnrecognizedFlagError(opt)
if unparsed_args:
if self.__dict__['__use_gnu_getopt']:
# if using gnu_getopt just return the program name + remainder of argv.
return argv[:1] + unparsed_args
else:
# unparsed_args becomes the first non-flag detected by getopt to
# the end of argv. Because argv may have been modified above,
# return original_argv for this region.
return argv[:1] + original_argv[-len(unparsed_args):]
else:
return argv[:1]
def Reset(self):
"""Resets the values to the point before FLAGS(argv) was called."""
for f in self.FlagDict().values():
f.Unparse()
def RegisteredFlags(self):
"""Returns: a list of the names and short names of all registered flags."""
return self.FlagDict().keys()
def FlagValuesDict(self):
"""Returns: a dictionary that maps flag names to flag values."""
flag_values = {}
for flag_name in self.RegisteredFlags():
flag = self.FlagDict()[flag_name]
flag_values[flag_name] = flag.value
return flag_values
def __str__(self):
"""Generates a help string for all known flags."""
return self.GetHelp()
def GetHelp(self, prefix=''):
"""Generates a help string for all known flags."""
helplist = []
flags_by_module = self.FlagsByModuleDict()
if flags_by_module:
modules = flags_by_module.keys()
modules.sort()
# Print the help for the main module first, if possible.
main_module = _GetMainModule()
if main_module in modules:
modules.remove(main_module)
modules = [main_module] + modules
for module in modules:
self.__RenderOurModuleFlags(module, helplist)
self.__RenderModuleFlags('gflags',
_SPECIAL_FLAGS.FlagDict().values(),
helplist)
else:
# Just print one long list of flags.
self.__RenderFlagList(
self.FlagDict().values() + _SPECIAL_FLAGS.FlagDict().values(),
helplist, prefix)
return '\n'.join(helplist)
def __RenderModuleFlags(self, module, flags, output_lines, prefix=""):
"""Generates a help string for a given module."""
output_lines.append('\n%s%s:' % (prefix, module))
self.__RenderFlagList(flags, output_lines, prefix + " ")
def __RenderOurModuleFlags(self, module, output_lines, prefix=""):
"""Generates a help string for a given module."""
flags = self._GetFlagsDefinedByModule(module)
if flags:
self.__RenderModuleFlags(module, flags, output_lines, prefix)
def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=""):
"""Generates a help string for the key flags of a given module.
Args:
module: A module object or a module name (a string).
output_lines: A list of strings. The generated help message
lines will be appended to this list.
prefix: A string that is prepended to each generated help line.
"""
key_flags = self._GetKeyFlagsForModule(module)
if key_flags:
self.__RenderModuleFlags(module, key_flags, output_lines, prefix)
def MainModuleHelp(self):
"""Returns: A string describing the key flags of the main module."""
helplist = []
self.__RenderOurModuleKeyFlags(_GetMainModule(), helplist)
return '\n'.join(helplist)
def __RenderFlagList(self, flaglist, output_lines, prefix=" "):
fl = self.FlagDict()
special_fl = _SPECIAL_FLAGS.FlagDict()
flaglist = [(flag.name, flag) for flag in flaglist]
flaglist.sort()
flagset = {}
for (name, flag) in flaglist:
# It's possible this flag got deleted or overridden since being
# registered in the per-module flaglist. Check now against the
# canonical source of current flag information, the FlagDict.
if fl.get(name, None) != flag and special_fl.get(name, None) != flag:
# a different flag is using this name now
continue
# only print help once
if flagset.has_key(flag): continue
flagset[flag] = 1
flaghelp = ""
if flag.short_name: flaghelp += "-%s," % flag.short_name
if flag.boolean:
flaghelp += "--[no]%s" % flag.name + ":"
else:
flaghelp += "--%s" % flag.name + ":"
flaghelp += " "
if flag.help:
flaghelp += flag.help
flaghelp = TextWrap(flaghelp, indent=prefix+" ",
firstline_indent=prefix)
if flag.default_as_str:
flaghelp += "\n"
flaghelp += TextWrap("(default: %s)" % flag.default_as_str,
indent=prefix+" ")
if flag.parser.syntactic_help:
flaghelp += "\n"
flaghelp += TextWrap("(%s)" % flag.parser.syntactic_help,
indent=prefix+" ")
output_lines.append(flaghelp)
def get(self, name, default):
"""Returns the value of a flag (if not None) or a default value.
Args:
name: A string, the name of a flag.
default: Default value to use if the flag value is None.
"""
value = self.__getattr__(name)
if value is not None: # Can't do if not value, b/c value might be '0' or ""
return value
else:
return default
def ShortestUniquePrefixes(self, fl):
"""Returns: dictionary; maps flag names to their shortest unique prefix."""
# Sort the list of flag names
sorted_flags = []
for name, flag in fl.items():
sorted_flags.append(name)
if flag.boolean:
sorted_flags.append('no%s' % name)
sorted_flags.sort()
# For each name in the sorted list, determine the shortest unique
# prefix by comparing itself to the next name and to the previous
# name (the latter check uses cached info from the previous loop).
shortest_matches = {}
prev_idx = 0
for flag_idx in range(len(sorted_flags)):
curr = sorted_flags[flag_idx]
if flag_idx == (len(sorted_flags) - 1):
next = None
else:
next = sorted_flags[flag_idx+1]
next_len = len(next)
for curr_idx in range(len(curr)):
if (next is None
or curr_idx >= next_len
or curr[curr_idx] != next[curr_idx]):
# curr longer than next or no more chars in common
shortest_matches[curr] = curr[:max(prev_idx, curr_idx) + 1]
prev_idx = curr_idx
break
else:
# curr shorter than (or equal to) next
shortest_matches[curr] = curr
prev_idx = curr_idx + 1 # next will need at least one more char
return shortest_matches
def __IsFlagFileDirective(self, flag_string):
"""Checks whether flag_string contain a --flagfile=<foo> directive."""
if isinstance(flag_string, type("")):
if flag_string.startswith('--flagfile='):
return 1
elif flag_string == '--flagfile':
return 1
elif flag_string.startswith('-flagfile='):
return 1
elif flag_string == '-flagfile':
return 1
else:
return 0
return 0
def ExtractFilename(self, flagfile_str):
"""Returns filename from a flagfile_str of form -[-]flagfile=filename.
The cases of --flagfile foo and -flagfile foo shouldn't be hitting
this function, as they are dealt with in the level above this
function.
"""
if flagfile_str.startswith('--flagfile='):
return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip())
elif flagfile_str.startswith('-flagfile='):
return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip())
else:
raise FlagsError('Hit illegal --flagfile type: %s' % flagfile_str)
def __GetFlagFileLines(self, filename, parsed_file_list):
"""Returns the useful (!=comments, etc) lines from a file with flags.
Args:
filename: A string, the name of the flag file.
parsed_file_list: A list of the names of the files we have
already read. MUTATED BY THIS FUNCTION.
Returns:
List of strings. See the note below.
NOTE(springer): This function checks for a nested --flagfile=<foo>
tag and handles the lower file recursively. It returns a list of
all the lines that _could_ contain command flags. This is
EVERYTHING except whitespace lines and comments (lines starting
with '#' or '//').
"""
line_list = [] # All line from flagfile.
flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags.
try:
file_obj = open(filename, 'r')
except IOError, e_msg:
print e_msg
print 'ERROR:: Unable to open flagfile: %s' % (filename)
return flag_line_list
line_list = file_obj.readlines()
file_obj.close()
parsed_file_list.append(filename)
# This is where we check each line in the file we just read.
for line in line_list:
if line.isspace():
pass
# Checks for comment (a line that starts with '#').
elif line.startswith('#') or line.startswith('//'):
pass
# Checks for a nested "--flagfile=<bar>" flag in the current file.
# If we find one, recursively parse down into that file.
elif self.__IsFlagFileDirective(line):
sub_filename = self.ExtractFilename(line)
# We do a little safety check for reparsing a file we've already done.
if not sub_filename in parsed_file_list:
included_flags = self.__GetFlagFileLines(sub_filename,
parsed_file_list)
flag_line_list.extend(included_flags)
else: # Case of hitting a circularly included file.
print >>sys.stderr, ('Warning: Hit circular flagfile dependency: %s'
% sub_filename)
else:
# Any line that's not a comment or a nested flagfile should get
# copied into 2nd position. This leaves earlier arguements
# further back in the list, thus giving them higher priority.
flag_line_list.append(line.strip())
return flag_line_list
def ReadFlagsFromFiles(self, argv):
"""Processes command line args, but also allow args to be read from file.
Args:
argv: A list of strings, usually sys.argv, which may contain one
or more flagfile directives of the form --flagfile="./filename".
Returns:
A new list which has the original list combined with what we read
from any flagfile(s).
References: Global gflags.FLAG class instance.
This function should be called before the normal FLAGS(argv) call.
This function scans the input list for a flag that looks like:
--flagfile=<somefile>. Then it opens <somefile>, reads all valid key
and value pairs and inserts them into the input list between the
first item of the list and any subsequent items in the list.
Note that your application's flags are still defined the usual way
using gflags DEFINE_flag() type functions.
Notes (assuming we're getting a commandline of some sort as our input):
--> Flags from the command line argv _should_ always take precedence!
--> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
It will be processed after the parent flag file is done.
--> For duplicate flags, first one we hit should "win".
--> In a flagfile, a line beginning with # or // is a comment.
--> Entirely blank lines _should_ be ignored.
"""
parsed_file_list = []
rest_of_args = argv
new_argv = []
while rest_of_args:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
if self.__IsFlagFileDirective(current_arg):
# This handles the case of -(-)flagfile foo. In this case the
# next arg really is part of this one.
if current_arg == '--flagfile' or current_arg == '-flagfile':
if not rest_of_args:
raise IllegalFlagValue('--flagfile with no argument')
flag_filename = os.path.expanduser(rest_of_args[0])
rest_of_args = rest_of_args[1:]
else:
# This handles the case of (-)-flagfile=foo.
flag_filename = self.ExtractFilename(current_arg)
new_argv = (new_argv[:1] +
self.__GetFlagFileLines(flag_filename, parsed_file_list) +
new_argv[1:])
else:
new_argv.append(current_arg)
return new_argv
def FlagsIntoString(self):
"""Returns a string with the flags assignments from this FlagValues object.
This function ignores flags whose value is None. Each flag
assignment is separated by a newline.
NOTE: MUST mirror the behavior of the C++ function
CommandlineFlagsIntoString from google3/base/commandlineflags.cc.
"""
s = ''
for flag in self.FlagDict().values():
if flag.value is not None:
s += flag.Serialize() + '\n'
return s
def AppendFlagsIntoFile(self, filename):
"""Appends all flags assignments from this FlagInfo object to a file.
Output will be in the format of a flagfile.
NOTE: MUST mirror the behavior of the C++ version of
AppendFlagsIntoFile from google3/base/commandlineflags.cc.
"""
out_file = open(filename, 'a')
out_file.write(self.FlagsIntoString())
out_file.close()
def WriteHelpInXMLFormat(self, outfile=None):
"""Outputs flag documentation in XML format.
NOTE: We use element names that are consistent with those used by
the C++ command-line flag library, from
google3/base/commandlineflags_reporting.cc. We also use a few new
elements (e.g., <key>), but we do not interfere / overlap with
existing XML elements used by the C++ library. Please maintain this
consistency.
Args:
outfile: File object we write to. Default None means sys.stdout.
"""
outfile = outfile or sys.stdout
outfile.write('<?xml version=\"1.0\"?>\n')
outfile.write('<AllFlags>\n')
indent = ' '
_WriteSimpleXMLElement(outfile, 'program', os.path.basename(sys.argv[0]),
indent)
usage_doc = sys.modules['__main__'].__doc__
if not usage_doc:
usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0]
else:
usage_doc = usage_doc.replace('%s', sys.argv[0])
_WriteSimpleXMLElement(outfile, 'usage', usage_doc, indent)
# Get list of key flags for the main module.
key_flags = self._GetKeyFlagsForModule(_GetMainModule())
# Sort flags by declaring module name and next by flag name.
flags_by_module = self.FlagsByModuleDict()
all_module_names = list(flags_by_module.keys())
all_module_names.sort()
for module_name in all_module_names:
flag_list = [(f.name, f) for f in flags_by_module[module_name]]
flag_list.sort()
for unused_flag_name, flag in flag_list:
is_key = flag in key_flags
flag.WriteInfoInXMLFormat(outfile, module_name,
is_key=is_key, indent=indent)
outfile.write('</AllFlags>\n')
outfile.flush()
# end of FlagValues definition
# The global FlagValues instance
FLAGS = FlagValues()
def _MakeXMLSafe(s):
"""Escapes <, >, and & from s, and removes XML 1.0-illegal chars."""
s = cgi.escape(s) # Escape <, >, and &
# Remove characters that cannot appear in an XML 1.0 document
# (http://www.w3.org/TR/REC-xml/#charsets).
#
# NOTE: if there are problems with current solution, one may move to
# XML 1.1, which allows such chars, if they're entity-escaped (&#xHH;).
s = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f]', '', s)
return s
def _WriteSimpleXMLElement(outfile, name, value, indent):
"""Writes a simple XML element.
Args:
outfile: File object we write the XML element to.
name: A string, the name of XML element.
value: A Python object, whose string representation will be used
as the value of the XML element.
indent: A string, prepended to each line of generated output.
"""
value_str = str(value)
if isinstance(value, bool):
# Display boolean values as the C++ flag library does: no caps.
value_str = value_str.lower()
outfile.write('%s<%s>%s</%s>\n' %
(indent, name, _MakeXMLSafe(value_str), name))
class Flag:
"""Information about a command-line flag.
'Flag' objects define the following fields:
.name - the name for this flag
.default - the default value for this flag
.default_as_str - default value as repr'd string, e.g., "'true'" (or None)
.value - the most recent parsed value of this flag; set by Parse()
.help - a help string or None if no help is available
.short_name - the single letter alias for this flag (or None)
.boolean - if 'true', this flag does not accept arguments
.present - true if this flag was parsed from command line flags.
.parser - an ArgumentParser object
.serializer - an ArgumentSerializer object
.allow_override - the flag may be redefined without raising an error
The only public method of a 'Flag' object is Parse(), but it is
typically only called by a 'FlagValues' object. The Parse() method is
a thin wrapper around the 'ArgumentParser' Parse() method. The parsed
value is saved in .value, and the .present attribute is updated. If
this flag was already present, a FlagsError is raised.
Parse() is also called during __init__ to parse the default value and
initialize the .value attribute. This enables other python modules to
safely use flags even if the __main__ module neglects to parse the
command line arguments. The .present attribute is cleared after
__init__ parsing. If the default value is set to None, then the
__init__ parsing step is skipped and the .value attribute is
initialized to None.
Note: The default value is also presented to the user in the help
string, so it is important that it be a legal value for this flag.
"""
def __init__(self, parser, serializer, name, default, help_string,
short_name=None, boolean=0, allow_override=0):
self.name = name
if not help_string:
help_string = '(no help available)'
self.help = help_string
self.short_name = short_name
self.boolean = boolean
self.present = 0
self.parser = parser
self.serializer = serializer
self.allow_override = allow_override
self.value = None
self.SetDefault(default)
def __GetParsedValueAsString(self, value):
if value is None:
return None
if self.serializer:
return repr(self.serializer.Serialize(value))
if self.boolean:
if value:
return repr('true')
else:
return repr('false')
return repr(str(value))
def Parse(self, argument):
try:
self.value = self.parser.Parse(argument)
except ValueError, e: # recast ValueError as IllegalFlagValue
raise IllegalFlagValue("flag --%s: %s" % (self.name, e))
self.present += 1
def Unparse(self):
if self.default is None:
self.value = None
else:
self.Parse(self.default)
self.present = 0
def Serialize(self):
if self.value is None:
return ''
if self.boolean:
if self.value:
return "--%s" % self.name
else:
return "--no%s" % self.name
else:
if not self.serializer:
raise FlagsError("Serializer not present for flag %s" % self.name)
return "--%s=%s" % (self.name, self.serializer.Serialize(self.value))
def SetDefault(self, value):
"""Changes the default value (and current value too) for this Flag."""
# We can't allow a None override because it may end up not being
# passed to C++ code when we're overriding C++ flags. So we
# cowardly bail out until someone fixes the semantics of trying to
# pass None to a C++ flag. See swig_flags.Init() for details on
# this behavior.
if value is None and self.allow_override:
raise DuplicateFlag(self.name)
self.default = value
self.Unparse()
self.default_as_str = self.__GetParsedValueAsString(self.value)
def Type(self):
"""Returns: a string that describes the type of this Flag."""
# NOTE: we use strings, and not the types.*Type constants because
# our flags can have more exotic types, e.g., 'comma separated list
# of strings', 'whitespace separated list of strings', etc.
return self.parser.Type()
def WriteInfoInXMLFormat(self, outfile, module_name, is_key=False, indent=''):
"""Writes common info about this flag, in XML format.
This is information that is relevant to all flags (e.g., name,
meaning, etc.). If you defined a flag that has some other pieces of
info, then please override _WriteCustomInfoInXMLFormat.
Please do NOT override this method.
Args:
outfile: File object we write to.
module_name: A string, the name of the module that defines this flag.
is_key: A boolean, True iff this flag is key for main module.
indent: A string that is prepended to each generated line.
"""
outfile.write(indent + '<flag>\n')
inner_indent = indent + ' '
if is_key:
_WriteSimpleXMLElement(outfile, 'key', 'yes', inner_indent)
_WriteSimpleXMLElement(outfile, 'file', module_name, inner_indent)
# Print flag features that are relevant for all flags.
_WriteSimpleXMLElement(outfile, 'name', self.name, inner_indent)
if self.short_name:
_WriteSimpleXMLElement(outfile, 'short_name', self.short_name,
inner_indent)
if self.help:
_WriteSimpleXMLElement(outfile, 'meaning', self.help, inner_indent)
_WriteSimpleXMLElement(outfile, 'default', self.default, inner_indent)
_WriteSimpleXMLElement(outfile, 'current', self.value, inner_indent)
_WriteSimpleXMLElement(outfile, 'type', self.Type(), inner_indent)
# Print extra flag features this flag may have.
self._WriteCustomInfoInXMLFormat(outfile, inner_indent)
outfile.write(indent + '</flag>\n')
def _WriteCustomInfoInXMLFormat(self, outfile, indent):
"""Writes extra info about this flag, in XML format.
"Extra" means "not already printed by WriteInfoInXMLFormat above."
Args:
outfile: File object we write to.
indent: A string that is prepended to each generated line.
"""
# Usually, the parser knows the extra details about the flag, so
# we just forward the call to it.
self.parser.WriteCustomInfoInXMLFormat(outfile, indent)
# End of Flag definition
class ArgumentParser:
"""Base class used to parse and convert arguments.
The Parse() method checks to make sure that the string argument is a
legal value and convert it to a native type. If the value cannot be
converted, it should throw a 'ValueError' exception with a human
readable explanation of why the value is illegal.
Subclasses should also define a syntactic_help string which may be
presented to the user to describe the form of the legal values.
"""
syntactic_help = ""
def Parse(self, argument):
"""Default implementation: always returns its argument unmodified."""
return argument
def Type(self):
return 'string'
def WriteCustomInfoInXMLFormat(self, outfile, indent):
pass
class ArgumentSerializer:
"""Base class for generating string representations of a flag value."""
def Serialize(self, value):
return str(value)
class ListSerializer(ArgumentSerializer):
def __init__(self, list_sep):
self.list_sep = list_sep
def Serialize(self, value):
return self.list_sep.join([str(x) for x in value])
# The DEFINE functions are explained in mode details in the module doc string.
def DEFINE(parser, name, default, help, flag_values=FLAGS, serializer=None,
**args):
"""Registers a generic Flag object.
NOTE: in the docstrings of all DEFINE* functions, "registers" is short
for "creates a new flag and registers it".
Auxiliary function: clients should use the specialized DEFINE_<type>
function instead.
Args:
parser: ArgumentParser that is used to parse the flag arguments.
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
flag_values: FlagValues object the flag will be registered with.
serializer: ArgumentSerializer that serializes the flag value.
args: Dictionary with extra keyword args that are passes to the
Flag __init__.
"""
DEFINE_flag(Flag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_flag(flag, flag_values=FLAGS):
"""Registers a 'Flag' object with a 'FlagValues' object.
By default, the global FLAGS 'FlagValue' object is used.
Typical users will use one of the more specialized DEFINE_xxx
functions, such as DEFINE_string or DEFINE_integer. But developers
who need to create Flag objects themselves should use this function
to register their flags.
"""
# copying the reference to flag_values prevents pychecker warnings
fv = flag_values
fv[flag.name] = flag
# Tell flag_values who's defining the flag.
if isinstance(flag_values, FlagValues):
# Regarding the above isinstance test: some users pass funny
# values of flag_values (e.g., {}) in order to avoid the flag
# registration (in the past, there used to be a flag_values ==
# FLAGS test here) and redefine flags with the same name (e.g.,
# debug). To avoid breaking their code, we perform the
# registration only if flag_values is a real FlagValues object.
flag_values._RegisterFlagByModule(_GetCallingModule(), flag)
def _InternalDeclareKeyFlags(flag_names, flag_values=FLAGS):
"""Declares a flag as key for the calling module.
Internal function. User code should call DECLARE_key_flag or
ADOPT_module_key_flags instead.
Args:
flag_names: A list of strings that are names of already-registered
Flag objects.
flag_values: A FlagValues object. This should almost never need
to be overridden.
Raises:
UnrecognizedFlagError: when we refer to a flag that was not
defined yet.
"""
module = _GetCallingModule()
for flag_name in flag_names:
if flag_name not in flag_values:
raise UnrecognizedFlagError(flag_name)
flag = flag_values.FlagDict()[flag_name]
flag_values._RegisterKeyFlagForModule(module, flag)
def DECLARE_key_flag(flag_name, flag_values=FLAGS):
"""Declares one flag as key to the current module.
Key flags are flags that are deemed really important for a module.
They are important when listing help messages; e.g., if the
--helpshort command-line flag is used, then only the key flags of the
main module are listed (instead of all flags, as in the case of
--help).
Sample usage:
flags.DECLARED_key_flag('flag_1')
Args:
flag_name: A string, the name of an already declared flag.
(Redeclaring flags as key, including flags implicitly key
because they were declared in this module, is a no-op.)
flag_values: A FlagValues object. This should almost never
need to be overridden.
"""
_InternalDeclareKeyFlags([flag_name], flag_values=flag_values)
def ADOPT_module_key_flags(module, flag_values=FLAGS):
"""Declares that all flags key to a module are key to the current module.
Args:
module: A module object.
flag_values: A FlagValues object. This should almost never need
to be overridden.
Raises:
FlagsError: When given an argument that is a module name (a
string), instead of a module object.
"""
# NOTE(salcianu): an even better test would be if not
# isinstance(module, types.ModuleType) but I didn't want to import
# types for such a tiny use.
if isinstance(module, str):
raise FlagsError('Received module name %s; expected a module object.'
% module)
_InternalDeclareKeyFlags(
[f.name for f in flag_values._GetKeyFlagsForModule(module.__name__)],
flag_values=flag_values)
#
# STRING FLAGS
#
def DEFINE_string(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value can be any string."""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# BOOLEAN FLAGS
#
# and the special HELP flags.
class BooleanParser(ArgumentParser):
"""Parser of boolean values."""
def Convert(self, argument):
"""Converts the argument to a boolean; raise ValueError on errors."""
if type(argument) == str:
if argument.lower() in ['true', 't', '1']:
return True
elif argument.lower() in ['false', 'f', '0']:
return False
bool_argument = bool(argument)
if argument == bool_argument:
# The argument is a valid boolean (True, False, 0, or 1), and not just
# something that always converts to bool (list, string, int, etc.).
return bool_argument
raise ValueError('Non-boolean argument to boolean flag', argument)
def Parse(self, argument):
val = self.Convert(argument)
return val
def Type(self):
return 'bool'
class BooleanFlag(Flag):
"""Basic boolean flag.
Boolean flags do not take any arguments, and their value is either
True (1) or False (0). The false value is specified on the command
line by prepending the word 'no' to either the long or the short flag
name.
For example, if a Boolean flag was created whose long name was
'update' and whose short name was 'x', then this flag could be
explicitly unset through either --noupdate or --nox.
"""
def __init__(self, name, default, help, short_name=None, **args):
p = BooleanParser()
Flag.__init__(self, p, None, name, default, help, short_name, 1, **args)
if not self.help: self.help = "a boolean value"
def DEFINE_boolean(name, default, help, flag_values=FLAGS, **args):
"""Registers a boolean flag.
Such a boolean flag does not take an argument. If a user wants to
specify a false value explicitly, the long option beginning with 'no'
must be used: i.e. --noflag
This flag will have a value of None, True or False. None is possible
if default=None and the user does not specify the flag on the command
line.
"""
DEFINE_flag(BooleanFlag(name, default, help, **args), flag_values)
# Match C++ API to unconfuse C++ people.
DEFINE_bool = DEFINE_boolean
class HelpFlag(BooleanFlag):
"""
HelpFlag is a special boolean flag that prints usage information and
raises a SystemExit exception if it is ever found in the command
line arguments. Note this is called with allow_override=1, so other
apps can define their own --help flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "help", 0, "show this help",
short_name="?", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = str(FLAGS)
print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
if flags:
print "flags:"
print flags
sys.exit(1)
class HelpXMLFlag(BooleanFlag):
"""Similar to HelpFlag, but generates output in XML format."""
def __init__(self):
BooleanFlag.__init__(self, 'helpxml', False,
'like --help, but generates XML output',
allow_override=1)
def Parse(self, arg):
if arg:
FLAGS.WriteHelpInXMLFormat(sys.stdout)
sys.exit(1)
class HelpshortFlag(BooleanFlag):
"""
HelpshortFlag is a special boolean flag that prints usage
information for the "main" module, and rasies a SystemExit exception
if it is ever found in the command line arguments. Note this is
called with allow_override=1, so other apps can define their own
--helpshort flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "helpshort", 0,
"show usage only for this module", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = FLAGS.MainModuleHelp()
print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
if flags:
print "flags:"
print flags
sys.exit(1)
#
# FLOAT FLAGS
#
class FloatParser(ArgumentParser):
"""Parser of floating point values.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = "a"
number_name = "number"
syntactic_help = " ".join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound != None and upper_bound != None:
sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
elif lower_bound == 1:
sh = "a positive %s" % self.number_name
elif upper_bound == -1:
sh = "a negative %s" % self.number_name
elif lower_bound == 0:
sh = "a non-negative %s" % self.number_name
elif upper_bound != None:
sh = "%s <= %s" % (self.number_name, upper_bound)
elif lower_bound != None:
sh = "%s >= %s" % (self.number_name, lower_bound)
self.syntactic_help = sh
def Convert(self, argument):
"""Converts argument to a float; raises ValueError on errors."""
return float(argument)
def Parse(self, argument):
val = self.Convert(argument)
if ((self.lower_bound != None and val < self.lower_bound) or
(self.upper_bound != None and val > self.upper_bound)):
raise ValueError("%s is not %s" % (val, self.syntactic_help))
return val
def Type(self):
return 'float'
def WriteCustomInfoInXMLFormat(self, outfile, indent):
if self.lower_bound is not None:
_WriteSimpleXMLElement(outfile, 'lower_bound', self.lower_bound, indent)
if self.upper_bound is not None:
_WriteSimpleXMLElement(outfile, 'upper_bound', self.upper_bound, indent)
# End of FloatParser
def DEFINE_float(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value must be a float.
If lower_bound or upper_bound are set, then this flag must be
within the given range.
"""
parser = FloatParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# INTEGER FLAGS
#
class IntegerParser(FloatParser):
"""Parser of an integer value.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = "an"
number_name = "integer"
syntactic_help = " ".join((number_article, number_name))
def Convert(self, argument):
__pychecker__ = 'no-returnvalues'
if type(argument) == str:
base = 10
if len(argument) > 2 and argument[0] == "0" and argument[1] == "x":
base = 16
try:
return int(argument, base)
# ValueError is thrown when argument is a string, and overflows an int.
except ValueError:
return long(argument, base)
else:
try:
return int(argument)
# OverflowError is thrown when argument is numeric, and overflows an int.
except OverflowError:
return long(argument)
def Type(self):
return 'int'
def DEFINE_integer(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value must be an integer.
If lower_bound, or upper_bound are set, then this flag must be
within the given range.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# ENUM FLAGS
#
class EnumParser(ArgumentParser):
"""Parser of a string enum value (a string value from a given set).
If enum_values (see below) is not specified, any string is allowed.
"""
def __init__(self, enum_values=None):
self.enum_values = enum_values
def Parse(self, argument):
if self.enum_values and argument not in self.enum_values:
raise ValueError("value should be one of <%s>" %
"|".join(self.enum_values))
return argument
def Type(self):
return 'string enum'
class EnumFlag(Flag):
"""Basic enum flag; its value can be any string from list of enum_values."""
def __init__(self, name, default, help, enum_values=None,
short_name=None, **args):
enum_values = enum_values or []
p = EnumParser(enum_values)
g = ArgumentSerializer()
Flag.__init__(self, p, g, name, default, help, short_name, **args)
if not self.help: self.help = "an enum string"
self.help = "<%s>: %s" % ("|".join(enum_values), self.help)
def _WriteCustomInfoInXMLFormat(self, outfile, indent):
for enum_value in self.parser.enum_values:
_WriteSimpleXMLElement(outfile, 'enum_value', enum_value, indent)
def DEFINE_enum(name, default, enum_values, help, flag_values=FLAGS,
**args):
"""Registers a flag whose value can be any string from enum_values."""
DEFINE_flag(EnumFlag(name, default, help, enum_values, ** args),
flag_values)
#
# LIST FLAGS
#
class BaseListParser(ArgumentParser):
"""Base class for a parser of lists of strings.
To extend, inherit from this class; from the subclass __init__, call
BaseListParser.__init__(self, token, name)
where token is a character used to tokenize, and name is a description
of the separator.
"""
def __init__(self, token=None, name=None):
assert name
self._token = token
self._name = name
self.syntactic_help = "a %s separated list" % self._name
def Parse(self, argument):
if isinstance(argument, list):
return argument
elif argument == '':
return []
else:
return [s.strip() for s in argument.split(self._token)]
def Type(self):
return '%s separated list of strings' % self._name
class ListParser(BaseListParser):
"""Parser for a comma-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, ',', 'comma')
def WriteCustomInfoInXMLFormat(self, outfile, indent):
BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
_WriteSimpleXMLElement(outfile, 'list_separator', repr(','), indent)
class WhitespaceSeparatedListParser(BaseListParser):
"""Parser for a whitespace-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, None, 'whitespace')
def WriteCustomInfoInXMLFormat(self, outfile, indent):
BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
separators = list(string.whitespace)
separators.sort()
for ws_char in string.whitespace:
_WriteSimpleXMLElement(outfile, 'list_separator', repr(ws_char), indent)
def DEFINE_list(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a comma-separated list of strings."""
parser = ListParser()
serializer = ListSerializer(',')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
def DEFINE_spaceseplist(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a whitespace-separated list of strings.
Any whitespace can be used as a separator.
"""
parser = WhitespaceSeparatedListParser()
serializer = ListSerializer(' ')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# MULTI FLAGS
#
class MultiFlag(Flag):
"""A flag that can appear multiple time on the command-line.
The value of such a flag is a list that contains the individual values
from all the appearances of that flag on the command-line.
See the __doc__ for Flag for most behavior of this class. Only
differences in behavior are described here:
* The default value may be either a single value or a list of values.
A single value is interpreted as the [value] singleton list.
* The value of the flag is always a list, even if the option was
only supplied once, and even if the default value is a single
value
"""
def __init__(self, *args, **kwargs):
Flag.__init__(self, *args, **kwargs)
self.help += ';\n repeat this option to specify a list of values'
def Parse(self, arguments):
"""Parses one or more arguments with the installed parser.
Args:
arguments: a single argument or a list of arguments (typically a
list of default values); a single argument is converted
internally into a list containing one item.
"""
if not isinstance(arguments, list):
# Default value may be a list of values. Most other arguments
# will not be, so convert them into a single-item list to make
# processing simpler below.
arguments = [arguments]
if self.present:
# keep a backup reference to list of previously supplied option values
values = self.value
else:
# "erase" the defaults with an empty list
values = []
for item in arguments:
# have Flag superclass parse argument, overwriting self.value reference
Flag.Parse(self, item) # also increments self.present
values.append(self.value)
# put list of option values back in the 'value' attribute
self.value = values
def Serialize(self):
if not self.serializer:
raise FlagsError("Serializer not present for flag %s" % self.name)
if self.value is None:
return ''
s = ''
multi_value = self.value
for self.value in multi_value:
if s: s += ' '
s += Flag.Serialize(self)
self.value = multi_value
return s
def Type(self):
return 'multi ' + self.parser.Type()
def DEFINE_multi(parser, serializer, name, default, help, flag_values=FLAGS,
**args):
"""Registers a generic MultiFlag that parses its args with a given parser.
Auxiliary function. Normal users should NOT use it directly.
Developers who need to create their own 'Parser' classes for options
which can appear multiple times can call this module function to
register their flags.
"""
DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_multistring(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of any strings.
Use the flag on the command line multiple times to place multiple
string values into the list. The 'default' may be a single string
(which will be converted into a single-element list) or a list of
strings.
"""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
def DEFINE_multi_int(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of arbitrary integers.
Use the flag on the command line multiple times to place multiple
integer values into the list. The 'default' may be a single integer
(which will be converted into a single-element list) or a list of
integers.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
# Now register the flags that we want to exist in all applications.
# These are all defined with allow_override=1, so user-apps can use
# these flagnames for their own purposes, if they want.
DEFINE_flag(HelpFlag())
DEFINE_flag(HelpshortFlag())
DEFINE_flag(HelpXMLFlag())
# Define special flags here so that help may be generated for them.
_SPECIAL_FLAGS = FlagValues()
DEFINE_string(
'flagfile', "",
"Insert flag definitions from the given file into the command line.",
_SPECIAL_FLAGS)
DEFINE_string(
'undefok', "",
"comma-separated list of flag names that it is okay to specify "
"on the command line even if the program does not define a flag "
"with that name. IMPORTANT: flags in this list that have "
"arguments MUST use the --flag=value format.", _SPECIAL_FLAGS)
| 36.092993 | 80 | 0.684854 |
4c9ac512b465522f228ef340e17f6559998f9fc2
| 800 |
py
|
Python
|
journal/models/__init__.py
|
kevinlee12/cas
|
1284d5a05731e441d523a4894a28e8a194c491f0
|
[
"Apache-2.0"
] | null | null | null |
journal/models/__init__.py
|
kevinlee12/cas
|
1284d5a05731e441d523a4894a28e8a194c491f0
|
[
"Apache-2.0"
] | 3 |
2015-04-19T03:00:57.000Z
|
2015-04-19T03:02:20.000Z
|
journal/models/__init__.py
|
kevinlee12/cas
|
1284d5a05731e441d523a4894a28e8a194c491f0
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2015 The iU Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Place all imports here, in alphabetical order
# Naming convention: <model name>_models.py
from .activity_models import *
from .entry_models import *
from .users_models import *
| 34.782609 | 74 | 0.76125 |
0db21852b121da5fd8bbaccc2686f59fa3926e57
| 6,559 |
py
|
Python
|
pythonModules.py
|
ahmedelg/pythonModules
|
fe5779a7aea4224fc5658a4557c4be92b8343fe8
|
[
"MIT"
] | null | null | null |
pythonModules.py
|
ahmedelg/pythonModules
|
fe5779a7aea4224fc5658a4557c4be92b8343fe8
|
[
"MIT"
] | null | null | null |
pythonModules.py
|
ahmedelg/pythonModules
|
fe5779a7aea4224fc5658a4557c4be92b8343fe8
|
[
"MIT"
] | null | null | null |
from urllib.request import urlopen
from bs4 import BeautifulSoup
import csv
from urllib.error import URLError
from urllib.error import HTTPError
# button-group--pagination
# unstyled
# https://pypi.org/search/?c=Topic+%3A%3A+Software+Development+%3A%3A+Libraries+%3A%3A+Python+Modules&page=3
# https://pypi.org/search/?c=Topic+%3A%3A+Software+Development+%3A%3A+Libraries+%3A%3A+Python+Modules&page=500
# url = 'https://pypi.org/search/?c=Topic+%3A%3A+Software+Development+%3A%3A+Libraries+%3A%3A+Python+Modules'
# path = 'full_python_modules.csv'
def module_info(res):
modules_cnt = res.find(class_='unstyled').find_all('li')
def new_bs(url):
try:
res = urlopen(url)
except URLError as e:
print('server error!', e)
return None
except HTTPError as e:
print('response error!', e)
return None
html = res.read()
return BeautifulSoup(html, 'html.parser')
# bs = new_bs('https://pypi.org/search/?c=Topic+%3A%3A+Software+Development+%3A%3A+Libraries+%3A%3A+Python+Modules&page=500')
# module_info(bs)
def storePyModule(path, url):
bs = new_bs(url)
if bs != None:
data = module_info(bs)
else:
print(url)
with open(path, 'w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
# Get number of latest-page
def latest_page_number():
return 2
# Get number of first-page
def first_page_number():
return 1
# extract modules-data
# name, version, time, description, link
# [ {name:, version:, time: ,desc:, link}, {same} ]
# get name of module
def module_name(module):
try:
name = module.find(
class_='package-snippet__title').find(class_='package-snippet__name').get_text().strip()
return name
except AttributeError:
return "doesn't has a name!"
def module_version(module):
try:
version = module.find(
class_='package-snippet__title').find(class_='package-snippet__version').get_text().strip()
return version
except AttributeError:
return "doesn't has a version!"
def module_time(module):
try:
released_time = module.find(
class_='package-snippet__title').find(class_='package-snippet__released')
# if released_time.has_attr('title'):
# released_time = released_time.attrs['title']
# else:
# released_time = released_time.get_text()
# print(released_time.attrs['title'])
return released_time.get_text().strip('\n').strip()
except AttributeError:
return "doesn't has a released time!"
def module_desc(module):
try:
description = module.find(
class_='package-snippet__description').get_text().strip()
return description
except AttributeError:
return "doesn't has a description!"
def module_link(module):
link = 'https://pypi.org/'
module_link = module.find('a', {'class': 'package-snippet'})
if(module_link != None):
if module_link.has_attr('href'):
link += module_link.attrs['href']
else:
link = 'none!'
return link
else:
return "doesn't has link!"
def get_module_data(module):
name = module_name(module)
version = module_version(module)
time = module_time(module)
desc = module_desc(module)
link = module_link(module)
mdoule_data = {'name': name, 'version': version,
'released_time': time, 'description': desc, 'link': link}
# print('module_data: ')
# print('')
return mdoule_data
def extractModules(modulesSoup):
# modules-data
modules_data = []
# fetch all modules in page
modules_cnt = modulesSoup.find(
class_='unstyled').find_all('li') # <li> tag
# calc number of modules in page
numberFModules = len(modules_cnt)
# استخراج كل مكتبة علي حدي
for module in modules_cnt:
# get module-date
module_data = get_module_data(module) # {}
# insert module-data
modules_data.append(module_data)
return modules_data
# Save page-modules-data
def storePageModulesCsv(modules_data):
# print(modules_data[0]['name'])
# notificationcenter
# wunderpy2
# entry-logger-sanic
path = 'python_libraries.csv'
try:
with open(path, 'a', newline='') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
for module in modules_data:
module_row = [module['name'], module['version'],
module['released_time'], module['description'], module['link']]
csv_writer.writerow(module_row)
except FileExistsError:
return {'modules_num': len(modules_data), 'success': False}
return {'modules_num': len(modules_data), 'success': True}
def pythonModules():
standardUrl = 'https://pypi.org/search/?c=Topic+%3A%3A+Software+Development+%3A%3A+Libraries+%3A%3A+Python+Modules&page='
firstPage = first_page_number() # 1
latestPage = latest_page_number() # 500
while firstPage <= latestPage:
# handle URL
url = standardUrl+str(firstPage)
# ready bs
modulesSoup = new_bs(url)
if modulesSoup != None:
# extract modules-info from url
modules_data = extractModules(modulesSoup)
# store modules-data in csv file
csv_store_results = storePageModulesCsv(modules_data)
# print(csv_store_results)
# print('------------')
print('> url: ', url)
# print(modules_data)
print('------------')
print('> number of modules: ', csv_store_results['modules_num'])
if csv_store_results['success']:
print('success store!')
else:
print('failed store!')
print('------------')
else:
print('------------')
print('error in url: ', url)
print('------------')
firstPage += 1
pythonModules()
# print("first_name,last_name,age".split(","))
# # print('https://pypi.org/search/?c=Topic+%3A%3A+Software+Development+%3A%3A+Libraries+%3A%3A+Python+Modules&page=',str(1))
# # print(len('https://pypi.org/search/?c=Topic+%3A%3A+Software+Development+%3A%3A+Libraries+%3A%3A+Python+Modules&page='))
# strr = '''https://pypi.org/search/?c=Topic+%3A%3A+Software+Development+%3A%3A+Libraries+%3A%3A+Python+Modules&page='''
# # print(strr[0])
# # strr[len(strr)+1]='1'
# # print(strr.split())
# i =1
# print(strr+str(i))
| 29.813636 | 125 | 0.620369 |
6f0d03b83b8ac780e90760fe850e49d8e9cd7251
| 28,034 |
py
|
Python
|
s3fm/ui/filepane.py
|
kazhala/s3fm
|
99fb41427b502dcd292e413f5e9fc18675ce7066
|
[
"MIT"
] | null | null | null |
s3fm/ui/filepane.py
|
kazhala/s3fm
|
99fb41427b502dcd292e413f5e9fc18675ce7066
|
[
"MIT"
] | null | null | null |
s3fm/ui/filepane.py
|
kazhala/s3fm
|
99fb41427b502dcd292e413f5e9fc18675ce7066
|
[
"MIT"
] | null | null | null |
"""Module contains the main filepane which is used as the left/right pane."""
import asyncio
import math
from functools import wraps
from pathlib import Path
from typing import Awaitable, Callable, Dict, List, Optional, Tuple
from prompt_toolkit.filters.base import Condition
from prompt_toolkit.layout.containers import (
ConditionalContainer,
FloatContainer,
HSplit,
VSplit,
Window,
)
from prompt_toolkit.layout.controls import FormattedTextControl
from prompt_toolkit.layout.dimension import LayoutDimension
from s3fm.api.config import AppConfig, LineModeConfig, SpinnerConfig
from s3fm.api.fs import FS, S3, File
from s3fm.api.fuzzy import match_exact
from s3fm.api.history import History
from s3fm.enums import ErrorType, FileType, Pane, PaneMode
from s3fm.exceptions import ClientError, Notification
from s3fm.ui.spinner import Spinner
from s3fm.utils import get_dimension
def hist_dir(func: Callable[..., Awaitable[None]]):
"""Decorate a :class:`~s3fm.ui.filepane.FilePane` method to store the path history.
Args:
func: Function to be wrapped to store path infomation.
Returns:
Decorated function.
Examples:
>>> @hist_dir
... async def _(filepane: FilePane):
... pass
"""
@wraps(func)
async def executable(*args, **kwargs):
curr_path = str(
args[0]._fs._path if args[0]._mode == PaneMode.fs else args[0]._s3._path
)
curr_result = args[0]._history._directory.get(curr_path, 0)
args[0]._history._directory[curr_path] = args[0]._selected_file_index
await func(*args, **kwargs)
new_path = str(
args[0]._fs._path if args[0]._mode == PaneMode.fs else args[0]._s3._path
)
if new_path == curr_path:
args[0]._history._directory[curr_path] = curr_result
else:
args[0]._selected_file_index = args[0]._history._directory.get(new_path, 0)
return executable
def spin_spinner(func: Callable[..., Awaitable[None]]):
"""Decorate a :class:`~s3fm.ui.filepane.FilePane` method to start and stop spinner.
Args:
func: Function to be wrapped to start/stop spinner.
Returns:
Decorated function.
Examples:
>>> @spin_spinner
... async def _(filepane: FilePane):
... pass
"""
@wraps(func)
async def executable(*args, **kwargs):
if not args[0].loading:
args[0].loading = True
try:
await func(*args, **kwargs)
except:
raise
finally:
args[0].loading = False
return executable
def file_action(func: Callable[..., Awaitable[None]]):
"""Decorate a method related to file action.
On loading time, :attr:`~s3fm.ui.filepane.FilePane.current_selection`
may not exist and raise :obj:`IndexError`. Using this decorator
to perform additional checks.
Args:
func: The function to decorate.
Returns:
Updated function with checks.
Examples:
>>> @file_action
... async def _(filepane: FilePane):
... pass
"""
@wraps(func)
async def executable(*args, **kwargs):
if not args[0].current_selection:
return
await func(*args, **kwargs)
return executable
class FilePane(ConditionalContainer):
"""Main file pane of the app.
FilePane has 2 modes to operate: `PaneMode.s3` and `PaneMode.fs`. The default
mode is the s3 mode. The mode value at the moment cannot be configured via the
:class:`~s3fm.api.config.Config` class, this value is stored to the history via
:class:`~s3fm.api.history.History` and is retrieved on the next time the app is opened.
Args:
pane_id: A :class:`~s3fm.enums.Pane` indicating whether this pane
is the left pane or right pane. This is used to detect current app focus.
spinner_config: :class:`~s3fm.api.config.Spinner` configuration.
linemode_config: :class:`~s3fm.api.config.LineModeConfig` instance.
app_config: :class:`~s3fm.api.config.AppConfig` instance.
redraw: A callale that should be provided by :class:`~s3fm.app.App` which can force
an UI update on the app.
layout_single: A :class:`prompt_toolkit.filters.Condition` that can be used to check
if the current :class:`~s3fm.app.App` is single layout.
layout_vertical: A :class:`prompt_toolkit.filters.Condition` that can be used check
if the current :class:`~s3fm.app.App` is vertical layout.
focus: A function to be provided by :class:`~s3fm.app.App` to be used to get current
app focus.
history: :class:`~s3fm.api.hisotry.History` instnace.
set_error: A callable to be provided by :class:`~s3fm.app.App` to set error for the application.
"""
def __init__(
self,
pane_id: Pane,
spinner_config: SpinnerConfig,
linemode_config: LineModeConfig,
app_config: AppConfig,
redraw: Callable[[], None],
layout_single: Condition,
layout_vertical: Condition,
focus: Callable[[], Pane],
history: History,
set_error: Callable[[Optional[Notification]], None],
) -> None:
self._s3 = S3()
self._fs = FS()
self._mode = PaneMode.s3
self._loaded = False
self._files: List[File] = []
self._filtered_files: List[File] = []
self._searched_indices: Optional[Dict[int, List[int]]] = None
self._loading = True
self._dimension_offset = 0 if not app_config.border else 2
self._padding = app_config.padding
self._cycle = app_config.cycle
self._id: Pane = pane_id
self._single_mode = layout_single
self._vertical_mode = layout_vertical
self._focus = Condition(lambda: focus() == self._id)
self._selected_file_index = 0
self._width = 0
self._linemode = linemode_config
self._display_hidden = True
self._first_line = 0
self._last_line = self._get_height() - self._first_line
self._history = history
self._set_error = set_error
self._spinner = Spinner(
loading=Condition(lambda: self._loading),
pattern=spinner_config.pattern,
redraw=redraw,
delay=spinner_config.delay,
top=spinner_config.top,
bottom=spinner_config.bottom,
left=spinner_config.left,
right=spinner_config.right,
text=spinner_config.text,
)
super().__init__(
content=FloatContainer(
content=VSplit(
[
Window(
content=FormattedTextControl(" "),
width=LayoutDimension.exact(self._padding),
),
HSplit(
[
Window(
content=FormattedTextControl(self._get_pane_info),
height=LayoutDimension.exact(1),
),
Window(
content=FormattedTextControl(
self._get_formatted_files,
focusable=True,
show_cursor=False,
),
width=self._get_width_dimension,
),
]
),
Window(
content=FormattedTextControl(" "),
width=LayoutDimension.exact(self._padding),
),
],
height=self._get_height_dimension,
),
floats=[self._spinner],
),
filter=~self._single_mode | self._focus,
)
def _get_pane_info(self) -> List[Tuple[str, str]]:
"""Get the top panel info of the current pane.
This will be used to display some information at the top of
the filepane.
Returns:
A list of tuples which can be parsed as
:class:`prompt_toolkit.formatted_text.FormattedText`.
Raises:
Notification: When pane mode is not recognized.
"""
if not self._loaded:
return []
display_info = []
color_class = (
"class:filepane.focus_path"
if self._focus()
else "class:filepane.unfocus_path"
)
if self._mode == PaneMode.s3:
display_info.append((color_class, self._s3.uri))
elif self._mode == PaneMode.fs:
display_info.append(
(
color_class,
str(self._fs.path).replace(str(Path("~").expanduser()), "~"),
)
)
else:
self._mode = PaneMode.fs
self.set_error(
Notification("Unexpected pane mode.", error_type=ErrorType.warning)
)
return self._get_pane_info()
return display_info
def _get_formatted_files(self) -> List[Tuple[str, str]]:
"""Get content in `formatted_text` format to display.
This function will only try to return the necessary files to display
to optimise performance.
The files/height will be calculated dynamically based on certain conditions.
Returns:
A list of tuples which can be parsed as
:class:`prompt_toolkit.formatted_text.FormattedText`.
"""
display_files = []
if self.file_count == 0:
return display_files
height = self._get_height()
if self._selected_file_index < 0:
self._selected_file_index = 0
elif self._selected_file_index >= self.file_count:
self._selected_file_index = self.file_count - 1
if (self._last_line - self._first_line) < min(self.file_count, height):
self._last_line = min(self.file_count, height)
self._first_line = self._last_line - min(self.file_count, height)
if self._selected_file_index <= self._first_line:
self._first_line = self._selected_file_index
self._last_line = self._first_line + min(height, self.file_count)
elif self._selected_file_index >= self._last_line:
self._last_line = self._selected_file_index + 1
self._first_line = self._last_line - min(height, self.file_count)
if self._last_line > self.file_count:
self._last_line = self.file_count
self._first_line = self._last_line - min(height, self.file_count)
if self._first_line < 0:
self._first_line = 0
self._last_line = self._first_line + min(height, self.file_count)
for index in range(self._first_line, self._last_line):
file = self.files[index]
file_style, icon, name, info = self._get_file_info(file)
style_class = "class:filepane.other_line"
if index == self._selected_file_index and self._focus():
style_class = "class:filepane.current_line"
display_files.append(("[SetCursorPosition]", ""))
style_class += " %s" % file_style
display_files.append((style_class, icon))
if self._searched_indices is not None and index in self._searched_indices:
for j in range(len(name)):
if j in self._searched_indices[index]:
display_files.append(("class:filepane.searched", name[j]))
else:
display_files.append((style_class, name[j]))
else:
display_files.append((style_class, name))
display_files.append(
(
style_class,
" " * (self._width - len(icon) - len(name) - len(info)),
)
)
display_files.append((style_class, info))
display_files.append(("", "\n"))
if display_files:
display_files.pop()
return display_files
def _get_file_info(self, file: File) -> Tuple[str, str, str, str]:
"""Get the file info to display.
This is used internally by :meth:`FilePane._get_formatted_files`.
Args:
file: A :class:`~s3fm.id.File` instance.
Returns:
A tuple representing the style, icon, file_name and file_info.
Raises:
ClientError: When custom linemode does not return 4 values which caused
the unpack function to raise error.
"""
style_class = ""
icon = ""
file_name = file.name
file_info = file.info
for func in self._linemode.process:
result = func(file)
if isinstance(result, Tuple):
try:
style_class, icon, file_name, file_info = result
return style_class, icon, file_name, file_info
except ValueError:
raise ClientError(
"linemode process function should return a tuple of total 4 values (style_class, icon, file_name, file_info)."
)
if file.type in self._linemode.filetype_maps:
icon = self._linemode.filetype_maps[file.type]
if file.name in self._linemode.exact_maps:
icon = self._linemode.exact_maps[file.name]
ext = Path(file.name).suffix
if ext in self._linemode.extension_maps:
icon = self._linemode.extension_maps[ext]
style_class = self._linemode.style_maps[file.type]
return style_class, icon, file_name, file_info
def _get_width_dimension(self) -> LayoutDimension:
"""Retrieve the width dimension dynamically.
Returns:
:class:`prompt_toolkit.layout.Dimension` instance.
"""
width, _ = get_dimension(offset=self._dimension_offset + (self._padding * 2))
if self._vertical_mode():
if self._id == Pane.left:
width = math.ceil((width - (self._padding * 2)) / 2)
elif self._id == Pane.right:
width = math.floor((width - (self._padding * 2)) / 2)
self._width = width
return LayoutDimension(preferred=width)
def _get_height_dimension(self) -> LayoutDimension:
"""Retrieve the height dimension dynamically.
Returns:
:class:`prompt_toolkit.layout.Dimension` instance.
"""
return LayoutDimension(preferred=self._get_height() + 1)
def _get_height(self) -> int:
"""Obtain the total available height for file display.
Returns:
The available height to display files.
"""
if self._vertical_mode() or self._single_mode():
_, height = get_dimension(offset=self._dimension_offset + 2)
else:
_, height = get_dimension(offset=self._dimension_offset + 1)
if self._id == Pane.left:
height = math.ceil(height / 2) - 1
elif self._id == Pane.right:
height = math.floor(height / 2) - 1
return height
def scroll_down(
self, value: int = 1, page: bool = False, bottom: bool = False
) -> None:
"""Move current selection down.
Args:
value: Number of lines to scroll down.
page: Scroll half a page down.
bottom: Scroll to bottom.
"""
if bottom:
self._selected_file_index = self.file_count - 1
return
if page:
value = self._get_height() // 2
if self._cycle and value == 1:
self._selected_file_index = (
self._selected_file_index + 1
) % self.file_count
else:
self._selected_file_index += value
if self._selected_file_index >= self.file_count:
self._selected_file_index = self.file_count - 1
def scroll_up(self, value: int = 1, page: bool = False, top: bool = False) -> None:
"""Move current selection up.
Args:
value: Number of lines to scroll down.
page: Scroll half a page down.
top: Scroll to top.
"""
if top:
self._selected_file_index = 0
return
if page:
value = self._get_height() // 2
if self._cycle and value == 1:
self._selected_file_index = (
self._selected_file_index - 1
) % self.file_count
else:
self._selected_file_index -= value
if self._selected_file_index < 0:
self._selected_file_index = 0
def page_up(self, value: int = 1) -> None:
"""Scroll page up.
Slightly different scroll behavior than :meth:`FilePane.scroll_up`, similar
to vim "c-y".
Args:
value: Number of lines to scroll.
"""
if self._selected_file_index - value < 0:
self._selected_file_index = 0
return
self._first_line -= value
self._last_line -= value
self._selected_file_index -= value
def page_down(self, value: int = 1) -> None:
"""Scroll page down.
Slightly different scroll behavior than :meth:`FilePane.scroll_down`, similar
to vim "c-e".
Args:
value: Number of lines to scroll.
"""
if self._selected_file_index + value >= self.file_count:
self._selected_file_index = self.file_count - 1
return
self._first_line += value
self._last_line += value
self._selected_file_index += value
@hist_dir
@spin_spinner
@file_action
async def forward(self) -> None:
"""Handle the forward action on the current file based on filetype."""
current_path = self.path
if self._mode == PaneMode.fs:
if self.current_selection.type == FileType.dir:
try:
self._files = await self._fs.cd(Path(self.current_selection.name))
except Exception as e:
self.set_error(Notification(str(e)))
self._files = await self._fs.cd(Path(current_path))
elif self._mode == PaneMode.s3:
if (
self.current_selection.type == FileType.dir
or self.current_selection.type == FileType.bucket
):
try:
self._files = await self._s3.cd(self.current_selection.name)
except Exception as e:
self.set_error(Notification(str(e)))
self._files = await self._fs.cd(Path(current_path))
else:
self._mode = PaneMode.fs
self.set_error(
Notification("Unexpected pane mode.", error_type=ErrorType.warning)
)
return await self.forward()
await self.filter_files()
@hist_dir
@spin_spinner
async def backword(self) -> None:
"""Handle the backword action.
Raises:
Notification: Unexpected pane mode.
"""
if self._mode == PaneMode.fs:
self._files = await self._fs.cd()
elif self._mode == PaneMode.s3:
self._files = await self._s3.cd()
else:
self._mode = PaneMode.fs
self.set_error(
Notification("Unexpected pane mode.", error_type=ErrorType.warning)
)
return await self.filter_files()
await self.filter_files()
@spin_spinner
async def filter_files(self) -> None:
"""Shift up/down taking consideration of hidden status.
When the filepane change its hidden display status, if the current
highlight is a hidden file, the app will lost its highlighted line.
Use this method to shift down until it found a file thats not hidden.
"""
if self._display_hidden:
self._filtered_files = self._files
else:
self._filtered_files = list(
filter(lambda file: not file.hidden, self._files)
)
@spin_spinner
async def load_data(self) -> None:
"""Load the data into filepane.
Raises:
Notification: Current pane mode is not recognized.
"""
self._files = []
if self._mode == PaneMode.s3:
try:
self._files += await self._s3.get_paths()
except:
self.set_error(
Notification(
message="Target S3 path %s is not valid or you do not have sufficent permissions."
% self._s3.path
)
)
self._s3.path = Path("")
self._files += await self._s3.get_paths()
elif self._mode == PaneMode.fs:
try:
self._files += await self._fs.get_paths()
except:
self.set_error(
Notification(
message="Target path %s is not a valid directory."
% self._fs.path
)
)
self._fs.path = Path("").resolve()
self._files += await self._fs.get_paths()
else:
self._mode = PaneMode.fs
self.set_error(
Notification("Unexpected pane mode.", error_type=ErrorType.warning)
)
return await self.load_data()
await self.filter_files()
self._loaded = True
def set_error(self, notification: Notification = None) -> None:
"""Set error notification for the application.
This should only be used to set non-application error.
Args:
notification: A :class:`~s3fm.exceptions.Notification` instance.
"""
self._set_error(notification)
async def pane_toggle_hidden_files(self, value: bool = None) -> None:
"""Toggle the current focused pane display hidden file status.
Use this method to either instruct the current focused pane to show
hidden files or hide hidden files.
If current highlighted file is a hidden file and the focused pane
is instructed to hide hidden file, highlight will shift down until
a non hidden file.
Args:
value: Optional bool value to indicate show/hide.
If not provided, it will toggle the hidden file status.
"""
self.display_hidden_files = value or not self.display_hidden_files
await self.filter_files()
async def pane_switch_mode(self, mode: PaneMode = None) -> None:
"""Switch the pane operation mode from one to another.
If currently is local file system mode, then switch to s3 file system mode or vice versa.
Args:
mode: PaneMode for the current focus pane to switch to.
If not provided, it will switch to the alternative mode.
"""
if mode is not None:
self.mode = mode
else:
self.mode = PaneMode.s3 if self.mode == PaneMode.fs else PaneMode.fs
await self.load_data()
self.selected_file_index = 0
async def search_text(self, text: str) -> None:
"""Search text in all visible files.
Args:
text: Text to search.
"""
if not text:
self._searched_indices = None
else:
await asyncio.sleep(self._calculate_wait_time())
self._searched_indices = await match_exact(self.files, text)
def _calculate_wait_time(self) -> float:
"""Calculate wait time to smoother the application on big data set.
Using digit of the choices lengeth to get wait time.
For digit greater than 6, using formula 2^(digit - 5) * 0.3 to increase the wait_time.
Still experimenting, require improvements.
"""
wait_table = {
2: 0.05,
3: 0.1,
4: 0.2,
5: 0.3,
}
digit = 1
if self.file_count > 0:
digit = int(math.log10(self.file_count)) + 1
if digit < 2:
return 0.0
if digit in wait_table:
return wait_table[digit]
return wait_table[5] * (2 ** (digit - 5))
@property
def file_count(self) -> int:
"""int: Total file count."""
return len(self._filtered_files)
@property
def spinner(self) -> Spinner:
""":class:`~s3fm.ui.spinner.Spinner`: :class:`FilePane` spinner instance."""
return self._spinner
@property
def id(self) -> Pane:
"""Pane: Get the id indicating purpose of the filepane."""
return self._id
@id.setter
def id(self, value: Pane) -> None:
self._id = value
@property
def files(self) -> List[File]:
"""Iterable[File]: All available files to display."""
return self._filtered_files
@property
def current_selection(self) -> Optional[File]:
"""File: Get current file selection.
On filepane initialisation, if `current_selection` is requested,
return `None`.
"""
try:
return self._files[self._filtered_files[self._selected_file_index].index]
except IndexError:
return None
@property
def display_hidden_files(self) -> bool:
"""bool: Hidden file display status."""
return self._display_hidden
@display_hidden_files.setter
def display_hidden_files(self, value: bool) -> None:
self._display_hidden = value
@property
def loading(self) -> bool:
"""bool: Loading status of the pane."""
return self._loading
@loading.setter
def loading(self, value: bool) -> None:
self._loading = value
if value:
asyncio.create_task(self.spinner.start())
@property
def mode(self) -> PaneMode:
"""PaneMode: Current pane mode."""
return self._mode
@mode.setter
def mode(self, value: PaneMode) -> None:
self._mode = value
@property
def selected_file_index(self) -> int:
"""int: Current selection index."""
return self._selected_file_index
@selected_file_index.setter
def selected_file_index(self, value: int) -> None:
"""int: Current selection index."""
self._selected_file_index = value
@property
def path(self) -> str:
"""str: Current filepath."""
if self.mode == PaneMode.s3:
return str(self._s3.path)
elif self.mode == PaneMode.fs:
return str(self._fs.path)
else:
self._mode = PaneMode.fs
self.set_error(
Notification("Unexpected pane mode.", error_type=ErrorType.warning)
)
return self.path
@path.setter
def path(self, value) -> None:
if self.mode == PaneMode.s3:
self._s3.path = Path(value)
elif self.mode == PaneMode.fs:
self._fs.path = Path(value)
else:
self._mode = PaneMode.fs
self.set_error(
Notification("Unexpected pane mode.", error_type=ErrorType.warning)
)
self._fs.path = Path(value)
@property
def searched_indices(self) -> Optional[Dict[int, List[int]]]:
"""Optional[Dict[int, List[int]]]: Dictionary of current seach result index and matching indices."""
return self._searched_indices
@searched_indices.setter
def searched_indices(self, value) -> None:
self._searched_indices = value
| 35.218593 | 134 | 0.574445 |
1fe93ba9555bd08b02e2aefd416fba705baac027
| 3,639 |
py
|
Python
|
taotao-cloud-python/taotao-cloud-oldboy/day13-python-list-tuple-dict/s2.py
|
shuigedeng/taotao-cloud-paren
|
3d281b919490f7cbee4520211e2eee5da7387564
|
[
"Apache-2.0"
] | 47 |
2021-04-13T10:32:13.000Z
|
2022-03-31T10:30:30.000Z
|
taotao-cloud-python/taotao-cloud-oldboy/day13-python-list-tuple-dict/s2.py
|
shuigedeng/taotao-cloud-paren
|
3d281b919490f7cbee4520211e2eee5da7387564
|
[
"Apache-2.0"
] | 1 |
2021-11-01T07:41:04.000Z
|
2021-11-01T07:41:10.000Z
|
taotao-cloud-python/taotao-cloud-oldboy/day13-python-list-tuple-dict/s2.py
|
shuigedeng/taotao-cloud-paren
|
3d281b919490f7cbee4520211e2eee5da7387564
|
[
"Apache-2.0"
] | 21 |
2021-04-13T10:32:17.000Z
|
2022-03-26T07:43:22.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# l = [11,22,33,44,55,66,77,88,99,90]
# result = {}
# # {'k1': [77, 88, 99, 90], 'k2': [11, 22, 33, 44, 55]}
#
# for item in l:
# if item < 66:
# # result.update({"k1": item}) # {'k1':11}
# # {'k1': [11,22]}
# if "k1" not in result:
# result['k1'] = [item, ] # {'k1': [11,]}
# else:
# result['k1'].append(item)
# elif item > 66:
# if "k2" not in result:
# result['k2'] = [item, ] # {'k1': [11,]}
# else:
# result['k2'].append(item)
# for i in l:
# if i < 66:
# a.append(i)
# if i > 66:
# b.append(i)
# print({'k1':b,'k2':a})
# 有两个列表
# l1 = [11,22,33]
# l2 = [22,33,44]
# a. 获取内容相同的元素列表
# b. 获取 l1 中有, l2 中没有的元素列表
# c. 获取 l2 中有, l1 中没有的元素列表
# d. 获取 l1 和 l2 中内容都不同的元素
# l1 = [11,22,33]
# l2 = [22,33,44]
# for i in l2:
# if i not in l1:
# print(i)
# for i in l1:
# if i not in l2:
# print(i)
# count =0
#
# for i in range(1, 9):
# for v in range(1, 9):
# if i !=v:
# count += 1
# # count = count -8
# print(count)
#
# count =0
# ls = [3,5,8,9]
# for i in range(0, len(ls)):
# for v in range(0, len(ls)):
# if i !=v:
# count += 1
# # count = count -8
# print(count)
#99乘法表
# for i in range(1,10):
# string = ""
# for j in range(1,i+1):
# string +=str(j) + " * "+str(i) + " = " +str(i*j)+"\t"
# print(string)
# print("aaa",end="")
# print("bbb")
# for i in range(1,10):
# for j in range(1,i+1):
# print(str(j) + " * "+str(i) + " = " +str(i*j)+"\t",end="")
# print("\n",end="")
# for i in range(1,10):
# for j in range(1,i+1):
#
# \n
# print('alex', end='SB')
# print('alex')
# print('alex','haifeng','gangniang',sep='搞')
# 这里面的数字两个组合 多少种不同样的,数字不重复的
# li = [1,2,3,4]
# l =len(li)
# for i in range(0,l-1):
# for v in range(i+1,l):
# print(li[i],li[v])
# for x in range(1,100//5):
# for y in range(1,100//3):
# for z in range(1,100):
# if x + y + z == 100 and 5*x + 3*y + z/3 == 100:
# print(x,y,z)
# li = ['alex','eric',123]
# li[2] = str(li[2])
#
#
# v = "_".join(li)
# print(v)
# tu = ('alex', 'eric', 'rain')
# print(len(tu))
# print(tu[2])
# print(tu[1:])
# #step 为正
# print(tu[1:90])
# for elem in tu:
# print(elem)
# for idx in range(len(tu)):
# print(idx)
# for idx, elem in enumerate(tu, 10):
# print(idx, elem)
# tu =(
# "alex",
# [
# 11,
# 22,
# {
# "k1":'v1',
# "k2":["age","name"],
# "k3":(11,22,33)
# },
# 44
# ]
# )
# tu[1][2]["k2"].append("")
# nums = [2,7,11,15,1,8,7]
# a =[]
# for i in range(len(nums)):
# for j in range(len(nums)):
# if nums[i] + nums[j] ==9:
# a.append((i,j,))
# print(a)
# li = ["sdsdsd"]
# print (len(li))
# li.append("dsad")
# li.insert(0,"dsad")
# li.remove("eric")
# v = li.pop(1)
# print(li,v)
#
# v = li.reverse()
# for i in range(len(li)):
# print(i)
# for i,q in enumerate(li,100):
# print(i,q)
# for i in li:
# print(i)
# user_list = [
# ]
# for i in range(1,302):
# temp = {'name': 'alex'+str(i), 'email': '[email protected]' + str(i), 'pwd': 'pwd'+str(i)}
# user_list.append(temp)
#
# while True:
# # 每页显示10条数据
# s = input("请输入1,2,3-31页码:")
# s = int(s)
# # user_list[0,10] 1
# # user_list[10,20] 2
# # user_list[20,30] 3
# start = (s-1) * 10
# end = s * 10
# result = user_list[start: end]
# for item in result:
# print(item,type(item))
| 20.559322 | 91 | 0.446826 |
7543cabdf06b0b9412148757c651935af94961fd
| 9,981 |
py
|
Python
|
research/cv/faster_rcnn_dcn/src/FasterRcnn/bbox_assign_sample_stage2.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/faster_rcnn_dcn/src/FasterRcnn/bbox_assign_sample_stage2.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/faster_rcnn_dcn/src/FasterRcnn/bbox_assign_sample_stage2.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FasterRcnn-DCN tpositive and negative sample screening for Rcnn."""
import numpy as np
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore.ops import operations as P
from mindspore.common.tensor import Tensor
class BboxAssignSampleForRcnn(nn.Cell):
"""
Bbox assigner and sampler definition.
Args:
config (dict): Config.
batch_size (int): Batchsize.
num_bboxes (int): The anchor nums.
add_gt_as_proposals (bool): add gt bboxes as proposals flag.
Returns:
Tensor, output tensor.
bbox_targets: bbox location, (batch_size, num_bboxes, 4)
bbox_weights: bbox weights, (batch_size, num_bboxes, 1)
labels: label for every bboxes, (batch_size, num_bboxes, 1)
label_weights: label weight for every bboxes, (batch_size, num_bboxes, 1)
Examples:
BboxAssignSampleForRcnn(config, 2, 1024, True)
"""
def __init__(self, config, batch_size, num_bboxes, add_gt_as_proposals):
super(BboxAssignSampleForRcnn, self).__init__()
cfg = config
self.dtype = np.float32
self.ms_type = mstype.float32
self.batch_size = batch_size
self.neg_iou_thr = cfg.neg_iou_thr_stage2
self.pos_iou_thr = cfg.pos_iou_thr_stage2
self.min_pos_iou = cfg.min_pos_iou_stage2
self.num_gts = cfg.num_gts
self.num_bboxes = num_bboxes
self.num_expected_pos = cfg.num_expected_pos_stage2
self.num_expected_neg = cfg.num_expected_neg_stage2
self.num_expected_total = cfg.num_expected_total_stage2
self.add_gt_as_proposals = add_gt_as_proposals
self.label_inds = Tensor(np.arange(1, self.num_gts + 1).astype(np.int32))
self.add_gt_as_proposals_valid = Tensor(np.full(self.num_gts, self.add_gt_as_proposals, dtype=np.int32))
self.concat = P.Concat(axis=0)
self.max_gt = P.ArgMaxWithValue(axis=0)
self.max_anchor = P.ArgMaxWithValue(axis=1)
self.sum_inds = P.ReduceSum()
self.iou = P.IOU()
self.greaterequal = P.GreaterEqual()
self.greater = P.Greater()
self.select = P.Select()
self.gatherND = P.GatherNd()
self.squeeze = P.Squeeze()
self.cast = P.Cast()
self.logicaland = P.LogicalAnd()
self.less = P.Less()
self.random_choice_with_mask_pos = P.RandomChoiceWithMask(self.num_expected_pos)
self.random_choice_with_mask_neg = P.RandomChoiceWithMask(self.num_expected_neg)
self.reshape = P.Reshape()
self.equal = P.Equal()
self.bounding_box_encode = P.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(0.1, 0.1, 0.2, 0.2))
self.concat_axis1 = P.Concat(axis=1)
self.logicalnot = P.LogicalNot()
self.tile = P.Tile()
# Check
self.check_gt_one = Tensor(np.full((self.num_gts, 4), -1, dtype=self.dtype))
self.check_anchor_two = Tensor(np.full((self.num_bboxes, 4), -2, dtype=self.dtype))
# Init tensor
self.assigned_gt_inds = Tensor(np.full(num_bboxes, -1, dtype=np.int32))
self.assigned_gt_zeros = Tensor(np.array(np.zeros(num_bboxes), dtype=np.int32))
self.assigned_gt_ones = Tensor(np.array(np.ones(num_bboxes), dtype=np.int32))
self.assigned_gt_ignores = Tensor(np.full(num_bboxes, -1, dtype=np.int32))
self.assigned_pos_ones = Tensor(np.array(np.ones(self.num_expected_pos), dtype=np.int32))
self.gt_ignores = Tensor(np.full(self.num_gts, -1, dtype=np.int32))
self.range_pos_size = Tensor(np.arange(self.num_expected_pos).astype(self.dtype))
self.check_neg_mask = Tensor(np.array(np.ones(self.num_expected_neg - self.num_expected_pos), dtype=np.bool))
self.bboxs_neg_mask = Tensor(np.zeros((self.num_expected_neg, 4), dtype=self.dtype))
self.labels_neg_mask = Tensor(np.array(np.zeros(self.num_expected_neg), dtype=np.uint8))
self.reshape_shape_pos = (self.num_expected_pos, 1)
self.reshape_shape_neg = (self.num_expected_neg, 1)
self.scalar_zero = Tensor(0.0, dtype=self.ms_type)
self.scalar_neg_iou_thr = Tensor(self.neg_iou_thr, dtype=self.ms_type)
self.scalar_pos_iou_thr = Tensor(self.pos_iou_thr, dtype=self.ms_type)
self.scalar_min_pos_iou = Tensor(self.min_pos_iou, dtype=self.ms_type)
def construct(self, gt_bboxes_i, gt_labels_i, valid_mask, bboxes, gt_valids):
"""construct"""
gt_bboxes_i = self.select(self.cast(self.tile(self.reshape(self.cast(gt_valids, mstype.int32), \
(self.num_gts, 1)), (1, 4)), mstype.bool_), \
gt_bboxes_i, self.check_gt_one)
bboxes = self.select(self.cast(self.tile(self.reshape(self.cast(valid_mask, mstype.int32), \
(self.num_bboxes, 1)), (1, 4)), mstype.bool_), \
bboxes, self.check_anchor_two)
overlaps = self.iou(bboxes, gt_bboxes_i)
max_overlaps_w_gt_index, max_overlaps_w_gt = self.max_gt(overlaps)
_, max_overlaps_w_ac = self.max_anchor(overlaps)
neg_sample_iou_mask = self.logicaland(self.greaterequal(max_overlaps_w_gt,
self.scalar_zero),
self.less(max_overlaps_w_gt,
self.scalar_neg_iou_thr))
assigned_gt_inds2 = self.select(neg_sample_iou_mask, self.assigned_gt_zeros, self.assigned_gt_inds)
pos_sample_iou_mask = self.greaterequal(max_overlaps_w_gt, self.scalar_pos_iou_thr)
assigned_gt_inds3 = self.select(pos_sample_iou_mask, \
max_overlaps_w_gt_index + self.assigned_gt_ones, assigned_gt_inds2)
for j in range(self.num_gts):
max_overlaps_w_ac_j = max_overlaps_w_ac[j:j+1:1]
overlaps_w_ac_j = overlaps[j:j+1:1, ::]
temp1 = self.greaterequal(max_overlaps_w_ac_j, self.scalar_min_pos_iou)
temp2 = self.squeeze(self.equal(overlaps_w_ac_j, max_overlaps_w_ac_j))
pos_mask_j = self.logicaland(temp1, temp2)
assigned_gt_inds3 = self.select(pos_mask_j, (j+1)*self.assigned_gt_ones, assigned_gt_inds3)
assigned_gt_inds5 = self.select(valid_mask, assigned_gt_inds3, self.assigned_gt_ignores)
bboxes = self.concat((gt_bboxes_i, bboxes))
label_inds_valid = self.select(gt_valids, self.label_inds, self.gt_ignores)
label_inds_valid = label_inds_valid * self.add_gt_as_proposals_valid
assigned_gt_inds5 = self.concat((label_inds_valid, assigned_gt_inds5))
# Get pos index
pos_index, valid_pos_index = self.random_choice_with_mask_pos(self.greater(assigned_gt_inds5, 0))
pos_check_valid = self.cast(self.greater(assigned_gt_inds5, 0), self.ms_type)
pos_check_valid = self.sum_inds(pos_check_valid, -1)
valid_pos_index = self.less(self.range_pos_size, pos_check_valid)
pos_index = pos_index * self.reshape(self.cast(valid_pos_index, mstype.int32), (self.num_expected_pos, 1))
num_pos = self.sum_inds(self.cast(self.logicalnot(valid_pos_index), self.ms_type), -1)
valid_pos_index = self.cast(valid_pos_index, mstype.int32)
pos_index = self.reshape(pos_index, self.reshape_shape_pos)
valid_pos_index = self.reshape(valid_pos_index, self.reshape_shape_pos)
pos_index = pos_index * valid_pos_index
pos_assigned_gt_index = self.gatherND(assigned_gt_inds5, pos_index) - self.assigned_pos_ones
pos_assigned_gt_index = self.reshape(pos_assigned_gt_index, self.reshape_shape_pos)
pos_assigned_gt_index = pos_assigned_gt_index * valid_pos_index
pos_gt_labels = self.gatherND(gt_labels_i, pos_assigned_gt_index)
# Get neg index
neg_index, valid_neg_index = self.random_choice_with_mask_neg(self.equal(assigned_gt_inds5, 0))
unvalid_pos_index = self.less(self.range_pos_size, num_pos)
valid_neg_index = self.logicaland(self.concat((self.check_neg_mask, unvalid_pos_index)), valid_neg_index)
neg_index = self.reshape(neg_index, self.reshape_shape_neg)
valid_neg_index = self.cast(valid_neg_index, mstype.int32)
valid_neg_index = self.reshape(valid_neg_index, self.reshape_shape_neg)
neg_index = neg_index * valid_neg_index
pos_bboxes_ = self.gatherND(bboxes, pos_index)
neg_bboxes_ = self.gatherND(bboxes, neg_index)
pos_assigned_gt_index = self.reshape(pos_assigned_gt_index, self.reshape_shape_pos)
pos_gt_bboxes_ = self.gatherND(gt_bboxes_i, pos_assigned_gt_index)
pos_bbox_targets_ = self.bounding_box_encode(pos_bboxes_, pos_gt_bboxes_)
total_bboxes = self.concat((pos_bboxes_, neg_bboxes_))
total_deltas = self.concat((pos_bbox_targets_, self.bboxs_neg_mask))
total_labels = self.concat((pos_gt_labels, self.labels_neg_mask))
valid_pos_index = self.reshape(valid_pos_index, self.reshape_shape_pos)
valid_neg_index = self.reshape(valid_neg_index, self.reshape_shape_neg)
total_mask = self.concat((valid_pos_index, valid_neg_index))
return total_bboxes, total_deltas, total_labels, total_mask
| 50.409091 | 117 | 0.684901 |
935de52bc6bb9c2fe214f434a06015f22ca604b3
| 1,882 |
py
|
Python
|
tests/models/test_token.py
|
cfogg/python-client
|
40e6891c8240e6b2acd5df538e622e9f15de43d6
|
[
"Apache-2.0"
] | 13 |
2017-03-17T15:15:20.000Z
|
2022-03-14T22:24:10.000Z
|
tests/models/test_token.py
|
cfogg/python-client
|
40e6891c8240e6b2acd5df538e622e9f15de43d6
|
[
"Apache-2.0"
] | 81 |
2017-01-12T23:06:48.000Z
|
2022-02-21T18:20:23.000Z
|
tests/models/test_token.py
|
cfogg/python-client
|
40e6891c8240e6b2acd5df538e622e9f15de43d6
|
[
"Apache-2.0"
] | 14 |
2017-05-25T10:49:13.000Z
|
2021-12-27T16:39:20.000Z
|
"""Split model tests module."""
from splitio.models import token
from splitio.models.grammar.condition import Condition
class TokenTests(object):
"""Token model tests."""
raw_false = {'pushEnabled': False}
def test_from_raw_false(self):
"""Test token model parsing."""
parsed = token.from_raw(self.raw_false)
assert parsed == None
raw_empty = {
'pushEnabled': True,
'token': '',
}
def test_from_raw_empty(self):
"""Test token model parsing."""
parsed = token.from_raw(self.raw_empty)
assert parsed == None
raw_ok = {
'pushEnabled': True,
'token': 'eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk56TTJNREk1TXpjMF9NVGd5TlRnMU1UZ3dOZz09X3NlZ21lbnRzXCI6W1wic3Vic2NyaWJlXCJdLFwiTnpNMk1ESTVNemMwX01UZ3lOVGcxTVRnd05nPT1fc3BsaXRzXCI6W1wic3Vic2NyaWJlXCJdLFwiY29udHJvbF9wcmlcIjpbXCJzdWJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXSxcImNvbnRyb2xfc2VjXCI6W1wic3Vic2NyaWJlXCIsXCJjaGFubmVsLW1ldGFkYXRhOnB1Ymxpc2hlcnNcIl19IiwieC1hYmx5LWNsaWVudElkIjoiY2xpZW50SWQiLCJleHAiOjE2MDIwODgxMjcsImlhdCI6MTYwMjA4NDUyN30.5_MjWonhs6yoFhw44hNJm3H7_YMjXpSW105DwjjppqE',
}
def test_from_raw(self):
"""Test token model parsing."""
parsed = token.from_raw(self.raw_ok)
assert isinstance(parsed, token.Token)
assert parsed.push_enabled == True
assert parsed.iat == 1602084527
assert parsed.exp == 1602088127
assert parsed.channels['NzM2MDI5Mzc0_MTgyNTg1MTgwNg==_segments'] == ['subscribe']
assert parsed.channels['NzM2MDI5Mzc0_MTgyNTg1MTgwNg==_splits'] == ['subscribe']
assert parsed.channels['control_pri'] == ['subscribe', 'channel-metadata:publishers']
assert parsed.channels['control_sec'] == ['subscribe', 'channel-metadata:publishers']
| 43.767442 | 574 | 0.74017 |
2afca2662f426152dedebff27b528eae90777478
| 9,291 |
py
|
Python
|
.pc/hg-updates.diff/Lib/ctypes/test/test_numbers.py
|
Hadron/python
|
73137f499ed658169f49273eee46845e3b53e800
|
[
"PSF-2.0"
] | 1,872 |
2015-01-02T18:56:47.000Z
|
2022-03-31T07:34:39.000Z
|
.pc/hg-updates.diff/Lib/ctypes/test/test_numbers.py
|
Hadron/python
|
73137f499ed658169f49273eee46845e3b53e800
|
[
"PSF-2.0"
] | 675 |
2015-02-27T09:01:01.000Z
|
2022-03-31T14:03:25.000Z
|
.pc/hg-updates.diff/Lib/ctypes/test/test_numbers.py
|
Hadron/python
|
73137f499ed658169f49273eee46845e3b53e800
|
[
"PSF-2.0"
] | 278 |
2015-01-02T03:48:20.000Z
|
2022-03-29T20:40:44.000Z
|
from ctypes import *
import unittest
import struct
def valid_ranges(*types):
# given a sequence of numeric types, collect their _type_
# attribute, which is a single format character compatible with
# the struct module, use the struct module to calculate the
# minimum and maximum value allowed for this format.
# Returns a list of (min, max) values.
result = []
for t in types:
fmt = t._type_
size = struct.calcsize(fmt)
a = struct.unpack(fmt, (b"\x00"*32)[:size])[0]
b = struct.unpack(fmt, (b"\xFF"*32)[:size])[0]
c = struct.unpack(fmt, (b"\x7F"+b"\x00"*32)[:size])[0]
d = struct.unpack(fmt, (b"\x80"+b"\xFF"*32)[:size])[0]
result.append((min(a, b, c, d), max(a, b, c, d)))
return result
ArgType = type(byref(c_int(0)))
unsigned_types = [c_ubyte, c_ushort, c_uint, c_ulong]
signed_types = [c_byte, c_short, c_int, c_long, c_longlong]
bool_types = []
float_types = [c_double, c_float]
try:
c_ulonglong
c_longlong
except NameError:
pass
else:
unsigned_types.append(c_ulonglong)
signed_types.append(c_longlong)
try:
c_bool
except NameError:
pass
else:
bool_types.append(c_bool)
unsigned_ranges = valid_ranges(*unsigned_types)
signed_ranges = valid_ranges(*signed_types)
bool_values = [True, False, 0, 1, -1, 5000, 'test', [], [1]]
################################################################
class NumberTestCase(unittest.TestCase):
def test_default_init(self):
# default values are set to zero
for t in signed_types + unsigned_types + float_types:
self.assertEqual(t().value, 0)
def test_unsigned_values(self):
# the value given to the constructor is available
# as the 'value' attribute
for t, (l, h) in zip(unsigned_types, unsigned_ranges):
self.assertEqual(t(l).value, l)
self.assertEqual(t(h).value, h)
def test_signed_values(self):
# see above
for t, (l, h) in zip(signed_types, signed_ranges):
self.assertEqual(t(l).value, l)
self.assertEqual(t(h).value, h)
def test_bool_values(self):
from operator import truth
for t, v in zip(bool_types, bool_values):
self.assertEqual(t(v).value, truth(v))
def test_typeerror(self):
# Only numbers are allowed in the contructor,
# otherwise TypeError is raised
for t in signed_types + unsigned_types + float_types:
self.assertRaises(TypeError, t, "")
self.assertRaises(TypeError, t, None)
@unittest.skip('test disabled')
def test_valid_ranges(self):
# invalid values of the correct type
# raise ValueError (not OverflowError)
for t, (l, h) in zip(unsigned_types, unsigned_ranges):
self.assertRaises(ValueError, t, l-1)
self.assertRaises(ValueError, t, h+1)
def test_from_param(self):
# the from_param class method attribute always
# returns PyCArgObject instances
for t in signed_types + unsigned_types + float_types:
self.assertEqual(ArgType, type(t.from_param(0)))
def test_byref(self):
# calling byref returns also a PyCArgObject instance
for t in signed_types + unsigned_types + float_types + bool_types:
parm = byref(t())
self.assertEqual(ArgType, type(parm))
def test_floats(self):
# c_float and c_double can be created from
# Python int and float
class FloatLike(object):
def __float__(self):
return 2.0
f = FloatLike()
for t in float_types:
self.assertEqual(t(2.0).value, 2.0)
self.assertEqual(t(2).value, 2.0)
self.assertEqual(t(2).value, 2.0)
self.assertEqual(t(f).value, 2.0)
def test_integers(self):
class FloatLike(object):
def __float__(self):
return 2.0
f = FloatLike()
class IntLike(object):
def __int__(self):
return 2
i = IntLike()
# integers cannot be constructed from floats,
# but from integer-like objects
for t in signed_types + unsigned_types:
self.assertRaises(TypeError, t, 3.14)
self.assertRaises(TypeError, t, f)
self.assertEqual(t(i).value, 2)
def test_sizes(self):
for t in signed_types + unsigned_types + float_types + bool_types:
try:
size = struct.calcsize(t._type_)
except struct.error:
continue
# sizeof of the type...
self.assertEqual(sizeof(t), size)
# and sizeof of an instance
self.assertEqual(sizeof(t()), size)
def test_alignments(self):
for t in signed_types + unsigned_types + float_types:
code = t._type_ # the typecode
align = struct.calcsize("c%c" % code) - struct.calcsize(code)
# alignment of the type...
self.assertEqual((code, alignment(t)),
(code, align))
# and alignment of an instance
self.assertEqual((code, alignment(t())),
(code, align))
def test_int_from_address(self):
from array import array
for t in signed_types + unsigned_types:
# the array module doesn't support all format codes
# (no 'q' or 'Q')
try:
array(t._type_)
except ValueError:
continue
a = array(t._type_, [100])
# v now is an integer at an 'external' memory location
v = t.from_address(a.buffer_info()[0])
self.assertEqual(v.value, a[0])
self.assertEqual(type(v), t)
# changing the value at the memory location changes v's value also
a[0] = 42
self.assertEqual(v.value, a[0])
def test_float_from_address(self):
from array import array
for t in float_types:
a = array(t._type_, [3.14])
v = t.from_address(a.buffer_info()[0])
self.assertEqual(v.value, a[0])
self.assertIs(type(v), t)
a[0] = 2.3456e17
self.assertEqual(v.value, a[0])
self.assertIs(type(v), t)
def test_char_from_address(self):
from ctypes import c_char
from array import array
a = array('b', [0])
a[0] = ord('x')
v = c_char.from_address(a.buffer_info()[0])
self.assertEqual(v.value, b'x')
self.assertIs(type(v), c_char)
a[0] = ord('?')
self.assertEqual(v.value, b'?')
# array does not support c_bool / 't'
@unittest.skip('test disabled')
def test_bool_from_address(self):
from ctypes import c_bool
from array import array
a = array(c_bool._type_, [True])
v = t.from_address(a.buffer_info()[0])
self.assertEqual(v.value, a[0])
self.assertEqual(type(v) is t)
a[0] = False
self.assertEqual(v.value, a[0])
self.assertEqual(type(v) is t)
def test_init(self):
# c_int() can be initialized from Python's int, and c_int.
# Not from c_long or so, which seems strange, abc should
# probably be changed:
self.assertRaises(TypeError, c_int, c_long(42))
def test_float_overflow(self):
import sys
big_int = int(sys.float_info.max) * 2
for t in float_types + [c_longdouble]:
self.assertRaises(OverflowError, t, big_int)
if (hasattr(t, "__ctype_be__")):
self.assertRaises(OverflowError, t.__ctype_be__, big_int)
if (hasattr(t, "__ctype_le__")):
self.assertRaises(OverflowError, t.__ctype_le__, big_int)
@unittest.skip('test disabled')
def test_perf(self):
check_perf()
from ctypes import _SimpleCData
class c_int_S(_SimpleCData):
_type_ = "i"
__slots__ = []
def run_test(rep, msg, func, arg=None):
## items = [None] * rep
items = range(rep)
from time import clock
if arg is not None:
start = clock()
for i in items:
func(arg); func(arg); func(arg); func(arg); func(arg)
stop = clock()
else:
start = clock()
for i in items:
func(); func(); func(); func(); func()
stop = clock()
print("%15s: %.2f us" % (msg, ((stop-start)*1e6/5/rep)))
def check_perf():
# Construct 5 objects
from ctypes import c_int
REP = 200000
run_test(REP, "int()", int)
run_test(REP, "int(999)", int)
run_test(REP, "c_int()", c_int)
run_test(REP, "c_int(999)", c_int)
run_test(REP, "c_int_S()", c_int_S)
run_test(REP, "c_int_S(999)", c_int_S)
# Python 2.3 -OO, win2k, P4 700 MHz:
#
# int(): 0.87 us
# int(999): 0.87 us
# c_int(): 3.35 us
# c_int(999): 3.34 us
# c_int_S(): 3.23 us
# c_int_S(999): 3.24 us
# Python 2.2 -OO, win2k, P4 700 MHz:
#
# int(): 0.89 us
# int(999): 0.89 us
# c_int(): 9.99 us
# c_int(999): 10.02 us
# c_int_S(): 9.87 us
# c_int_S(999): 9.85 us
if __name__ == '__main__':
## check_perf()
unittest.main()
| 31.927835 | 78 | 0.578194 |
d0abab780fce5c6ef0a78f8745e2f8cd1e82f7cc
| 4,826 |
py
|
Python
|
TIDALDL-PY/tidal_dl/lang/croatian.py
|
DragonightFury/Tidal-Media-Downloader
|
a2eb174b686eb80449de773860f27fdc0c7d06e7
|
[
"Apache-2.0"
] | null | null | null |
TIDALDL-PY/tidal_dl/lang/croatian.py
|
DragonightFury/Tidal-Media-Downloader
|
a2eb174b686eb80449de773860f27fdc0c7d06e7
|
[
"Apache-2.0"
] | null | null | null |
TIDALDL-PY/tidal_dl/lang/croatian.py
|
DragonightFury/Tidal-Media-Downloader
|
a2eb174b686eb80449de773860f27fdc0c7d06e7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : croatian.py
@Time : 2020/08/19
@Author : Yaronzz
@Version : 1.0
@Contact : [email protected]
@Desc :
'''
class LangCroatian(object):
SETTING = "POSTAVKE"
VALUE = "VRIJEDNOST"
SETTING_DOWNLOAD_PATH = "putanja preuzimanja"
SETTING_ONLY_M4A = "Pretvori mp4 u m4a"
SETTING_ADD_EXPLICIT_TAG = "Dodaj eksplicitni znak"
SETTING_ADD_HYPHEN = "Dodaj crticu"
SETTING_ADD_YEAR = "Dodaj godinu prije imena albuma u mapi"
SETTING_USE_TRACK_NUM = "Dodaj korisnicki broj pjesme"
SETTING_AUDIO_QUALITY = "Kvaliteta zvuka"
SETTING_VIDEO_QUALITY = "Kvaliteta videozapisa"
SETTING_CHECK_EXIST = "Provjeri postoji li"
SETTING_ARTIST_BEFORE_TITLE = "Ime izvodjaca prije imena pjesme"
SETTING_ALBUMID_BEFORE_FOLDER = "ID oznaka prije imena albuma u mapi"
SETTING_INCLUDE_EP = "Ukljuci singl i EP"
SETTING_SAVE_COVERS = "Spremi ilustraciju albuma"
SETTING_LANGUAGE = "Jezik"
SETTING_USE_PLAYLIST_FOLDER = "Use playlist folder"
SETTING_MULITHREAD_DOWNLOAD = "Multi thread download"
SETTING_ALBUM_FOLDER_FORMAT = "Album folder format"
SETTING_TRACK_FILE_FORMAT = "Track file format"
SETTING_SHOW_PROGRESS = "Show progress"
SETTING_SAVE_ALBUMINFO = "Save AlbumInfo.txt"
SETTING_ADD_LYRICS = "Add lyrics"
SETTING_LYRICS_SERVER_PROXY = "Lyrics server proxy"
SETTING_PATH = "Settings path"
CHOICE = "ODABIR"
FUNCTION = "FUNKCIJA"
CHOICE_ENTER = "Ulaz"
CHOICE_ENTER_URLID = "Unesi 'Url/ID':"
CHOICE_EXIT = "Izlaz"
CHOICE_LOGIN = "Check AccessToken"
CHOICE_SETTINGS = "Postavke"
CHOICE_SET_ACCESS_TOKEN = "Postavi AccessToken"
CHOICE_DOWNLOAD_BY_URL = "Preuzmi po url-u ili ID-u"
CHOICE_LOGOUT = "Logout"
PRINT_ERR = "[ERR]"
PRINT_INFO = "[INFO]"
PRINT_SUCCESS = "[USPIJESNO]"
PRINT_ENTER_CHOICE = "Unesi odabir:"
PRINT_LATEST_VERSION = "Posljednja verzija:"
#PRINT_USERNAME = "korisnik:"
#PRINT_PASSWORD = "lozinka:"
CHANGE_START_SETTINGS = "Pokreni postavke (0'-Izlaz,'1'-Da):"
CHANGE_DOWNLOAD_PATH = "Putanja preuzimanja('0' ne mijenjaj):"
CHANGE_AUDIO_QUALITY = "Kvaliteta zvuka('0'-Normalna,'1'-Visoka,'2'-HiFi,'3'-Master):"
CHANGE_VIDEO_QUALITY = "Kvaliteta videozapisa(1080, 720, 480, 360):"
CHANGE_ONLYM4A = "Pretvori mp4 u m4a('0'-Ne,'1'-Da):"
CHANGE_ADD_EXPLICIT_TAG = "Dodaj eksplicitni znak u imeni datoteke('0'-Ne,'1'-Da):"
CHANGE_ADD_HYPHEN = "Koristi crtice umjesto razmaka u imeni datoteke ('0'-Ne,'1'-Da):"
CHANGE_ADD_YEAR = "Dodaj godinu u imenu albuma u mapi('0'-Ne,'1'-Da):"
CHANGE_USE_TRACK_NUM = "Dodaj broj pjesme prije imena pjesme u datoteci ('0'-Ne,'1'-Da):"
CHANGE_CHECK_EXIST = "Provjeri postoji li ista datoteka prije preuzimanja pjesme('0'-Ne,'1'-Da):"
CHANGE_ARTIST_BEFORE_TITLE = "Dodaj ime izvodjaca prije imena pjesme('0'-Ne,'1'-Da):"
CHANGE_INCLUDE_EP = "Ukljuci singlove i EP-ove prilikom preuzimanja albuma izvodjaca('0'-Ne,'1'-Da):"
CHANGE_ALBUMID_BEFORE_FOLDER = "Dodaj ID oznaku prije imena albuma u datoteci('0'-Ne,'1'-Da):"
CHANGE_SAVE_COVERS = "Spremi ilustracije albuma('0'-Ne,'1'-Da):"
CHANGE_LANGUAGE = "Odaberi jezik"
CHANGE_ALBUM_FOLDER_FORMAT = "Album folder format('0' not modify):"
CHANGE_TRACK_FILE_FORMAT = "Track file format('0' not modify):"
CHANGE_SHOW_PROGRESS = "Show progress('0'-No,'1'-Yes):"
CHANGE_SAVE_ALBUM_INFO = "Save AlbumInfo.txt('0'-No,'1'-Yes):"
CHANGE_ADD_LYRICS = "Add lyrics('0'-No,'1'-Yes):"
CHANGE_LYRICS_SERVER_PROXY = "Lyrics server proxy('0' not modify):"
# {} are required in these strings
AUTH_START_LOGIN = "Starting login process..."
AUTH_LOGIN_CODE = "Your login code is {}"
AUTH_NEXT_STEP = "Go to {} within the next {} to complete setup."
AUTH_WAITING = "Waiting for authorization..."
AUTH_TIMEOUT = "Operation timed out."
MSG_VALID_ACCESSTOKEN = "AccessToken good for {}."
MSG_INVAILD_ACCESSTOKEN = "Expired AccessToken. Attempting to refresh it."
MSG_PATH_ERR = "Pogreska putanje!"
MSG_INPUT_ERR = "Pogreska unosa!"
MODEL_ALBUM_PROPERTY = "ALBUM-SVOJSTVO"
MODEL_TRACK_PROPERTY = "PJESMA-SVOJSTVO"
MODEL_VIDEO_PROPERTY = "VIDEOZAPIS-SVOJSTVO"
MODEL_ARTIST_PROPERTY = "IZVODJAC-SVOJSTVO"
MODEL_PLAYLIST_PROPERTY = "PLAYLISTA-SVOJSTVO"
MODEL_TITLE = 'Naziv'
MODEL_TRACK_NUMBER = 'Broj pjesme'
MODEL_VIDEO_NUMBER = 'Broj videozapisa'
MODEL_RELEASE_DATE = 'Datum izlaska'
MODEL_VERSION = 'Verzija'
MODEL_EXPLICIT = 'Eksplicitno'
MODEL_ALBUM = 'Album'
MODEL_ID = 'ID'
MODEL_NAME = 'Naziv'
MODEL_TYPE = 'Vrsta'
| 44.275229 | 106 | 0.688148 |
cbd2286666be06e9d4bf6c6f6472846a7b2546b0
| 691 |
py
|
Python
|
CAAPR/CAAPR_AstroMagic/PTS/pts/modeling/analysis/__init__.py
|
wdobbels/CAAPR
|
50d0b32642a61af614c22f1c6dc3c4a00a1e71a3
|
[
"MIT"
] | 7 |
2016-05-20T21:56:39.000Z
|
2022-02-07T21:09:48.000Z
|
CAAPR/CAAPR_AstroMagic/PTS/pts/modeling/analysis/__init__.py
|
wdobbels/CAAPR
|
50d0b32642a61af614c22f1c6dc3c4a00a1e71a3
|
[
"MIT"
] | 1 |
2019-03-21T16:10:04.000Z
|
2019-03-22T17:21:56.000Z
|
CAAPR/CAAPR_AstroMagic/PTS/pts/modeling/analysis/__init__.py
|
wdobbels/CAAPR
|
50d0b32642a61af614c22f1c6dc3c4a00a1e71a3
|
[
"MIT"
] | 1 |
2020-05-19T16:17:17.000Z
|
2020-05-19T16:17:17.000Z
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# -----------------------------------------------------------------
# Package initialization file
# -----------------------------------------------------------------
## \package pts.modeling.analysis TO DO
#
# This package ...
#
# -----------------------------------------------------------------
# Import classes to make them available at the level of this subpackage
| 34.55 | 71 | 0.321274 |
2815cbe6b04174d464e79a9419108d469f5fa244
| 852 |
py
|
Python
|
businesslayer/ai/deep-learning/tensorflow/scripts/Tensorflow2Mnist.py
|
dragomirdev/DataTachyonPlatform
|
733ce014b33942d1962b1d4c80aae47699e9ff15
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
businesslayer/ai/deep-learning/tensorflow/scripts/Tensorflow2Mnist.py
|
dragomirdev/DataTachyonPlatform
|
733ce014b33942d1962b1d4c80aae47699e9ff15
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
businesslayer/ai/deep-learning/tensorflow/scripts/Tensorflow2Mnist.py
|
dragomirdev/DataTachyonPlatform
|
733ce014b33942d1962b1d4c80aae47699e9ff15
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import Model
try:
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test)
except Exception:
pass
| 29.37931 | 82 | 0.679577 |
f3a5f9f3223abaee4e73bf73d24077d9b70e7099
| 1,490 |
py
|
Python
|
src/regression/processing.py
|
satishukadam/regression
|
a135bf99411fc22ab71297727e4542cb505b4bcf
|
[
"MIT"
] | null | null | null |
src/regression/processing.py
|
satishukadam/regression
|
a135bf99411fc22ab71297727e4542cb505b4bcf
|
[
"MIT"
] | 7 |
2019-11-07T15:11:32.000Z
|
2019-11-07T15:11:41.000Z
|
src/regression/processing.py
|
satishukadam/regression
|
a135bf99411fc22ab71297727e4542cb505b4bcf
|
[
"MIT"
] | null | null | null |
from regression.preparation import get_features, load_dataset
from configs import config
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import LinearRegression
from configs.logging_config import get_handler
import logging
# Create logger
logger = logging.getLogger(__name__)
handler = get_handler(logger)
logger.info('processing done!!!')
def train_pipeline(file=config.DATA_FILE):
"""This function will read the data and process features using a pipeline"""
# Loading data
data = load_dataset(file)
# Get numerical and categorical features
numerical_features, categorical_features = get_features(data)
# Create a pipeline for data pre-processing
numerical_pipe = Pipeline([('num_imputer', SimpleImputer(strategy='median')), ('scaler', StandardScaler())])
categorical_pipe = Pipeline([('cat_imputer', SimpleImputer(missing_values='NaN', strategy='most_frequent')),
('ohe', OneHotEncoder(sparse=False))])
# Merge two pipelines into a single pipeline
preprocessor = ColumnTransformer([('num_pipe', numerical_pipe, numerical_features),
('cat_pipe', categorical_pipe, categorical_features)])
pipeline = Pipeline([('preprocessor', preprocessor), ('linear_model', LinearRegression())])
return pipeline
| 39.210526 | 112 | 0.743624 |
80e4314b3c231d28c87f5b781a27a1205d4bc563
| 13,527 |
py
|
Python
|
django/core/management/commands/makemigrations.py
|
tqrg-bot/django
|
67b46ba7016da2d259c1ecc7d666d11f5e1cfaab
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/core/management/commands/makemigrations.py
|
tqrg-bot/django
|
67b46ba7016da2d259c1ecc7d666d11f5e1cfaab
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/core/management/commands/makemigrations.py
|
tqrg-bot/django
|
67b46ba7016da2d259c1ecc7d666d11f5e1cfaab
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1 |
2019-10-22T12:16:53.000Z
|
2019-10-22T12:16:53.000Z
|
import os
import sys
import warnings
from itertools import takewhile
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from django.db.migrations import Migration
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.questioner import (
InteractiveMigrationQuestioner, MigrationQuestioner,
NonInteractiveMigrationQuestioner,
)
from django.db.migrations.state import ProjectState
from django.db.migrations.writer import MigrationWriter
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.six import iteritems
from django.utils.six.moves import zip
class Command(BaseCommand):
help = "Creates new migration(s) for apps."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='*',
help='Specify the app label(s) to create migrations for.')
parser.add_argument('--dry-run', action='store_true', dest='dry_run', default=False,
help="Just show what migrations would be made; don't actually write them.")
parser.add_argument('--merge', action='store_true', dest='merge', default=False,
help="Enable fixing of migration conflicts.")
parser.add_argument('--empty', action='store_true', dest='empty', default=False,
help="Create an empty migration.")
parser.add_argument('--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument('-n', '--name', action='store', dest='name', default=None,
help="Use this name for migration file(s).")
parser.add_argument('-e', '--exit', action='store_true', dest='exit_code', default=False,
help='Exit with error code 1 if no changes needing migrations are found. '
'Deprecated, use the --check option instead.')
parser.add_argument('--check', action='store_true', dest='check_changes',
help='Exit with a non-zero status if model changes are missing migrations.')
def handle(self, *app_labels, **options):
self.verbosity = options.get('verbosity')
self.interactive = options.get('interactive')
self.dry_run = options.get('dry_run', False)
self.merge = options.get('merge', False)
self.empty = options.get('empty', False)
self.migration_name = options.get('name')
self.exit_code = options.get('exit_code', False)
check_changes = options['check_changes']
if self.exit_code:
warnings.warn(
"The --exit option is deprecated in favor of the --check option.",
RemovedInDjango20Warning
)
# Make sure the app they asked for exists
app_labels = set(app_labels)
bad_app_labels = set()
for app_label in app_labels:
try:
apps.get_app_config(app_label)
except LookupError:
bad_app_labels.add(app_label)
if bad_app_labels:
for app_label in bad_app_labels:
self.stderr.write("App '%s' could not be found. Is it in INSTALLED_APPS?" % app_label)
sys.exit(2)
# Load the current graph state. Pass in None for the connection so
# the loader doesn't try to resolve replaced migrations from DB.
loader = MigrationLoader(None, ignore_no_migrations=True)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any and they don't want to merge
conflicts = loader.detect_conflicts()
# If app_labels is specified, filter out conflicting migrations for unspecified apps
if app_labels:
conflicts = {
app_label: conflict for app_label, conflict in iteritems(conflicts)
if app_label in app_labels
}
if conflicts and not self.merge:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they want to merge and there's nothing to merge, then politely exit
if self.merge and not conflicts:
self.stdout.write("No conflicts detected to merge.")
return
# If they want to merge and there is something to merge, then
# divert into the merge code
if self.merge and conflicts:
return self.handle_merge(loader, conflicts)
if self.interactive:
questioner = InteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run)
else:
questioner = NonInteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run)
# Set up autodetector
autodetector = MigrationAutodetector(
loader.project_state(),
ProjectState.from_apps(apps),
questioner,
)
# If they want to make an empty migration, make one for each app
if self.empty:
if not app_labels:
raise CommandError("You must supply at least one app label when using --empty.")
# Make a fake changes() result we can pass to arrange_for_graph
changes = {
app: [Migration("custom", app)]
for app in app_labels
}
changes = autodetector.arrange_for_graph(
changes=changes,
graph=loader.graph,
migration_name=self.migration_name,
)
self.write_migration_files(changes)
return
# Detect changes
changes = autodetector.changes(
graph=loader.graph,
trim_to_apps=app_labels or None,
convert_apps=app_labels or None,
migration_name=self.migration_name,
)
if not changes:
# No changes? Tell them.
if self.verbosity >= 1:
if len(app_labels) == 1:
self.stdout.write("No changes detected in app '%s'" % app_labels.pop())
elif len(app_labels) > 1:
self.stdout.write("No changes detected in apps '%s'" % ("', '".join(app_labels)))
else:
self.stdout.write("No changes detected")
if self.exit_code:
sys.exit(1)
else:
self.write_migration_files(changes)
if check_changes:
sys.exit(1)
def write_migration_files(self, changes):
"""
Takes a changes dict and writes them out as migration files.
"""
directory_created = {}
for app_label, app_migrations in changes.items():
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Migrations for '%s':" % app_label) + "\n")
for migration in app_migrations:
# Describe the migration
writer = MigrationWriter(migration)
if self.verbosity >= 1:
# Display a relative path if it's below the current working
# directory, or an absolute path otherwise.
migration_string = os.path.relpath(writer.path)
if migration_string.startswith('..'):
migration_string = writer.path
self.stdout.write(" %s:\n" % (self.style.MIGRATE_LABEL(migration_string),))
for operation in migration.operations:
self.stdout.write(" - %s\n" % operation.describe())
if not self.dry_run:
# Write the migrations file to the disk.
migrations_directory = os.path.dirname(writer.path)
if not directory_created.get(app_label):
if not os.path.isdir(migrations_directory):
os.mkdir(migrations_directory)
init_path = os.path.join(migrations_directory, "__init__.py")
if not os.path.isfile(init_path):
open(init_path, "w").close()
# We just do this once per app
directory_created[app_label] = True
migration_string = writer.as_string()
with open(writer.path, "wb") as fh:
fh.write(migration_string)
elif self.verbosity == 3:
# Alternatively, makemigrations --dry-run --verbosity 3
# will output the migrations to stdout rather than saving
# the file to the disk.
self.stdout.write(self.style.MIGRATE_HEADING(
"Full migrations file '%s':" % writer.filename) + "\n"
)
self.stdout.write("%s\n" % writer.as_string())
def handle_merge(self, loader, conflicts):
"""
Handles merging together conflicted migrations interactively,
if it's safe; otherwise, advises on how to fix it.
"""
if self.interactive:
questioner = InteractiveMigrationQuestioner()
else:
questioner = MigrationQuestioner(defaults={'ask_merge': True})
for app_label, migration_names in conflicts.items():
# Grab out the migrations in question, and work out their
# common ancestor.
merge_migrations = []
for migration_name in migration_names:
migration = loader.get_migration(app_label, migration_name)
migration.ancestry = [
mig for mig in loader.graph.forwards_plan((app_label, migration_name))
if mig[0] == migration.app_label
]
merge_migrations.append(migration)
def all_items_equal(seq):
return all(item == seq[0] for item in seq[1:])
merge_migrations_generations = zip(*[m.ancestry for m in merge_migrations])
common_ancestor_count = sum(1 for common_ancestor_generation
in takewhile(all_items_equal, merge_migrations_generations))
if not common_ancestor_count:
raise ValueError("Could not find common ancestor of %s" % migration_names)
# Now work out the operations along each divergent branch
for migration in merge_migrations:
migration.branch = migration.ancestry[common_ancestor_count:]
migrations_ops = (loader.get_migration(node_app, node_name).operations
for node_app, node_name in migration.branch)
migration.merged_operations = sum(migrations_ops, [])
# In future, this could use some of the Optimizer code
# (can_optimize_through) to automatically see if they're
# mergeable. For now, we always just prompt the user.
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Merging %s" % app_label))
for migration in merge_migrations:
self.stdout.write(self.style.MIGRATE_LABEL(" Branch %s" % migration.name))
for operation in migration.merged_operations:
self.stdout.write(" - %s\n" % operation.describe())
if questioner.ask_merge(app_label):
# If they still want to merge it, then write out an empty
# file depending on the migrations needing merging.
numbers = [
MigrationAutodetector.parse_number(migration.name)
for migration in merge_migrations
]
try:
biggest_number = max(x for x in numbers if x is not None)
except ValueError:
biggest_number = 1
subclass = type("Migration", (Migration, ), {
"dependencies": [(app_label, migration.name) for migration in merge_migrations],
})
new_migration = subclass("%04i_merge" % (biggest_number + 1), app_label)
writer = MigrationWriter(new_migration)
if not self.dry_run:
# Write the merge migrations file to the disk
with open(writer.path, "wb") as fh:
fh.write(writer.as_string())
if self.verbosity > 0:
self.stdout.write("\nCreated new merge migration %s" % writer.path)
elif self.verbosity == 3:
# Alternatively, makemigrations --merge --dry-run --verbosity 3
# will output the merge migrations to stdout rather than saving
# the file to the disk.
self.stdout.write(self.style.MIGRATE_HEADING(
"Full merge migrations file '%s':" % writer.filename) + "\n"
)
self.stdout.write("%s\n" % writer.as_string())
| 47.968085 | 107 | 0.584017 |
c17405620d34f862632532bc76b5f404e23a1fdd
| 4,796 |
py
|
Python
|
FB2/FB2Builder.py
|
Ae-Mc/FB2
|
2c29f774ab08bdad5bd6144b1be71b93146ce8fe
|
[
"MIT"
] | 3 |
2020-11-15T10:55:22.000Z
|
2022-02-09T19:45:52.000Z
|
FB2/FB2Builder.py
|
Ae-Mc/FB2
|
2c29f774ab08bdad5bd6144b1be71b93146ce8fe
|
[
"MIT"
] | 1 |
2020-11-15T11:04:59.000Z
|
2020-11-19T22:12:52.000Z
|
FB2/FB2Builder.py
|
Ae-Mc/FB2
|
2c29f774ab08bdad5bd6144b1be71b93146ce8fe
|
[
"MIT"
] | null | null | null |
import xml.etree.ElementTree as ET
from base64 import b64encode
from typing import List, Tuple, Union
from xml.dom import minidom
from .builders import TitleInfoBuilder, DocumentInfoBuilder
from .TitleInfo import TitleInfo
from .FictionBook2dataclass import FictionBook2dataclass
class FB2Builder:
book: FictionBook2dataclass
"""Transforms FictionBook2 to xml (fb2) format"""
def __init__(self, book: FictionBook2dataclass):
self.book = book
def GetFB2(self) -> ET.Element:
fb2Tree = ET.Element("FictionBook", attrib={
"xmlns": "http://www.gribuser.ru/xml/fictionbook/2.0",
"xmlns:xlink": "http://www.w3.org/1999/xlink"
})
self._AddStylesheets(fb2Tree)
self._AddCustomInfos(fb2Tree)
self._AddDescription(fb2Tree)
self._AddBody(fb2Tree)
self._AddBinaries(fb2Tree)
return fb2Tree
def _AddStylesheets(self, root: ET.Element) -> None:
if self.book.stylesheets:
for stylesheet in self.book.stylesheets:
ET.SubElement(root, "stylesheet").text = stylesheet
def _AddCustomInfos(self, root: ET.Element) -> None:
if self.book.customInfos:
for customInfo in self.book.customInfos:
ET.SubElement(root, "custom-info").text = customInfo
def _AddDescription(self, root: ET.Element) -> None:
description = ET.SubElement(root, "description")
self._AddTitleInfo("title-info", self.book.titleInfo, description)
if self.book.sourceTitleInfo is not None:
self._AddTitleInfo(
"src-title-info", self.book.sourceTitleInfo, description)
self._AddDocumentInfo(description)
def _AddTitleInfo(self,
rootElement: str,
titleInfo: TitleInfo,
description: ET.Element) -> None:
builder = TitleInfoBuilder(rootTag=rootElement, titleInfo=titleInfo)
if titleInfo.coverPageImages:
builder.AddCoverImages([f"#{rootElement}-cover_{i}" for i in range(
len(titleInfo.coverPageImages))])
description.append(builder.GetResult())
def _AddDocumentInfo(self, description: ET.Element) -> None:
description.append(DocumentInfoBuilder(
documentInfo=self.book.documentInfo).GetResult())
def _AddBody(self, root: ET.Element) -> None:
if len(self.book.chapters):
bodyElement = ET.SubElement(root, "body")
ET.SubElement(ET.SubElement(bodyElement, "title"),
"p").text = self.book.titleInfo.title
for chapter in self.book.chapters:
bodyElement.append(self.BuildSectionFromChapter(chapter))
@staticmethod
def BuildSectionFromChapter(
chapter: Tuple[str, Union[
ET.Element, List[str], List[ET.Element]]]) -> ET.Element:
sectionElement = ET.Element("section")
ET.SubElement(ET.SubElement(sectionElement, "title"),
"p").text = chapter[0]
if(isinstance(chapter[1], list)
and all([isinstance(p, str) for p in chapter[1]])):
paragraph: str
for paragraph in chapter[1]: # type: ignore
ET.SubElement(sectionElement, "p").text = paragraph
else:
paragraphElement: ET.Element
paragraphs: List[ET.Element] = list(chapter[1]) # type: ignore
for paragraphElement in paragraphs:
sectionElement.append(paragraphElement)
return sectionElement
def _AddBinaries(self, root: ET.Element) -> None:
if self.book.titleInfo.coverPageImages is not None:
for i, coverImage in enumerate(
self.book.titleInfo.coverPageImages):
self._AddBinary(
root, f"title-info-cover_{i}", "image/jpeg", coverImage)
if (self.book.sourceTitleInfo
and self.book.sourceTitleInfo.coverPageImages):
for i, coverImage in enumerate(
self.book.sourceTitleInfo.coverPageImages):
self._AddBinary(
root,
f"src-title-info-cover#{i}",
"image/jpeg",
coverImage)
def _AddBinary(self,
root: ET.Element,
id: str,
contentType: str,
data: bytes) -> None:
ET.SubElement(
root, "binary", {"id": id, "content-type": contentType}
).text = b64encode(data).decode("utf-8")
@staticmethod
def _PrettifyXml(element: ET.Element) -> str:
dom = minidom.parseString(ET.tostring(element, "utf-8"))
return dom.toprettyxml(encoding="utf-8").decode("utf-8")
| 40.644068 | 79 | 0.605713 |
0986ddf4d14f111911e184d838621d65c209eebd
| 1,547 |
py
|
Python
|
async_metrics/asyncio.py
|
amenezes/async_metrics
|
28a2ead455830a629c89807eda93b4ab5ed3c661
|
[
"Apache-2.0"
] | 2 |
2021-09-02T11:45:29.000Z
|
2021-09-02T11:48:38.000Z
|
async_metrics/asyncio.py
|
amenezes/async_metrics
|
28a2ead455830a629c89807eda93b4ab5ed3c661
|
[
"Apache-2.0"
] | null | null | null |
async_metrics/asyncio.py
|
amenezes/async_metrics
|
28a2ead455830a629c89807eda93b4ab5ed3c661
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import inspect
from typing import Dict, List
from async_metrics.utils import measure_time_elapsed
def loop_info() -> List[Dict]:
"""Show information about running loop."""
loop = asyncio.get_event_loop()
return [
{
"running": loop.is_running(),
"policy": str(asyncio.get_event_loop_policy().__class__).split("'")[1],
"exception_handler": loop.get_exception_handler(),
}
]
def summary() -> Dict:
try:
return {
"tasks": len(asyncio.all_tasks()),
"watcher": asyncio.get_child_watcher().__doc__.split(".")[0], # type: ignore
}
except RuntimeError:
return {}
def current_task_info() -> Dict:
try:
ctask = asyncio.current_task()
return _task_info(ctask)
except (AttributeError, RuntimeError):
return {}
def tasks_info() -> List[Dict]:
try:
return [_task_info(task) for task in asyncio.all_tasks()]
except RuntimeError:
return []
def _task_info(task) -> Dict:
return {
"id": id(task),
"name": task.get_coro().__qualname__,
"task_name": task.get_name(),
"done": task.done(),
"cancelled": task.cancelled(),
"state": task._state,
"details": {"locals": inspect.getcoroutinelocals(task)},
}
@measure_time_elapsed
def all() -> Dict:
return {
"summary": summary(),
"loop": loop_info(),
"current_task": current_task_info(),
"tasks": tasks_info(),
}
| 23.8 | 89 | 0.585003 |
f848347efbeb846cc8001159378d4f39edff1beb
| 15,858 |
py
|
Python
|
distributed_resource_allocator/src/tests/test_simple_dtn_sim.py
|
nasa/MOSAIC
|
af396ec450bd9f6f95fc5c603e13964035e05cd6
|
[
"Apache-2.0"
] | 18 |
2019-04-01T02:58:38.000Z
|
2022-01-02T07:31:03.000Z
|
distributed_resource_allocator/src/tests/test_simple_dtn_sim.py
|
nasa/MOSAIC
|
af396ec450bd9f6f95fc5c603e13964035e05cd6
|
[
"Apache-2.0"
] | null | null | null |
distributed_resource_allocator/src/tests/test_simple_dtn_sim.py
|
nasa/MOSAIC
|
af396ec450bd9f6f95fc5c603e13964035e05cd6
|
[
"Apache-2.0"
] | 3 |
2019-11-04T01:52:50.000Z
|
2021-09-13T01:52:50.000Z
|
#!/usr/bin/python
"""
Copyright 2020 by California Institute of Technology. ALL RIGHTS RESERVED.
United States Government sponsorship acknowledged. Any commercial use
must be negotiated with the Office of Technology Transfer at the
California Institute of Technology.
This software may be subject to U.S. export control laws and regulations.
By accepting this document, the user agrees to comply with all applicable
U.S. export laws and regulations. User has the responsibility to obtain
export licenses, or other export authority as may be required before
exporting such information to foreign countries or providing access to
foreign persons.
This software is a copy and may not be current. The latest version is
maintained by and may be obtained from the Mobility and Robotics Sytstem
Section (347) at the Jet Propulsion Laboratory. Suggestions and patches
are welcome and should be sent to the software's maintainer.
"""
import rospy
from pdra.msg import CommData
from std_msgs.msg import String
from threading import Lock
import defaults_scaffolding as def_vals
import ast
from time import sleep
import mosaic_routers.srv as svs
import json
class message_wrapper(object):
""" A class to contain message wrappers for DTN """
def __init__(self, receive_time, destination, msg):
self.receive_time = receive_time
self.destination = destination
self.msg = msg
class simple_dtn_simulator(object):
""" A class to simulate multi-hop CGR-backed delay-tolerant networking in a ROS-based environment """
# def __init__(self, contact_plan: list, start_time_offset: float = 0, contact_plan_topic: str = '/network_topology', sleep_interval: float = 0.1):
def __init__(self, contact_plan, start_time_offset=0, contact_plan_topic='/network_topology', routing_service="/router", sleep_interval=0.1):
""" Inputs:
- contact_plan, a list of contacts. Each entry is a dictionary containing:
- "origin", the name of the transmitter
- "destination", the name of the receiver
- "time_start", the start time of the contact
- "time_end", the end time of the contact
- "bandwidth", the available bandwidth.
A contact is feasible if bandwidth>0 and the current time is between start_time and end_time
- start_time_offset. The simulator grabs the current time via `rospy.get_time()`.
On the other hand, time_start and time_end are often specified with respect to the simulation start time.
An offset equal to start_time_offset is applied to the clock to compensate for this.
- contact_plan_topic, the name of a ROS topic where contact_plan updates will be published.
For compatibility reasons, the updates are expected to be a JSON string containing a dictionary
with the key "contact_plan" and value corresponding to the contact_plan described above.
- routing_service, a string. The node will connect to a ROS service located at routing_service to
compute CGR routes between nodes. The routing service should expose the interface declared in
mosaic_routers.srv
- sleep_interval, a float. The internal loop will sleep for this amount of time to avoid busylooping.
"""
self.agents = {}
# Lock on agent publishers. Makes sure we do not delete a publisher in the callback while using it in loop()
self.pub_lock = Lock()
self.start_time_offset = start_time_offset
self.sleep_interval = sleep_interval
self.enqueued_messages = {}
self.agent_subscriptions = {}
self.agent_publishers = {}
self.messages_in_transmission = []
self.contact_plan = contact_plan
# Publish messages sent and received
self.activity_publisher = rospy.Publisher(
'message_activity', String, queue_size=10)
self.loginfo("Waiting for routing service")
rospy.wait_for_service(routing_service)
self.loginfo("Found routing service")
self.router = rospy.ServiceProxy(routing_service, svs.RouterService)
self.loginfo('Acquired proxy to routing service.')
self.update_contact_plan(contact_plan)
# Subscribe to contact plan updates
self.contact_plan_updater = rospy.Subscriber(
contact_plan_topic, String, self.contact_plan_msg_update)
def contact_plan_msg_update(self, msg):
self.logdebug("Received new contact plan")
new_network_topology = json.loads(msg.data)
new_contact_plan = new_network_topology["contact_plan"]
self.update_contact_plan(new_contact_plan)
def update_contact_plan(self, new_contact_plan):
self.contact_plan = new_contact_plan
agents_list = set()
for link in self.contact_plan:
agents_list.add(link["origin"])
agents_list.add(link["destination"])
self.agents = {agent: {
"rx": "/{}/comm/rx".format(agent),
"tx": "/{}/comm/tx".format(agent),
} for agent in agents_list
}
self._update_enqueued_messages()
self._update_subscriptions()
def _update_enqueued_messages(self):
# Add new agents
for tx_agent in self.agents.keys():
if tx_agent not in self.enqueued_messages.keys():
self.enqueued_messages[tx_agent] = {rx_agent: []
for rx_agent in self.agents.keys()}
else:
for rx_agent in self.agents.keys():
if rx_agent not in self.enqueued_messages[tx_agent].keys():
self.enqueued_messages[tx_agent][rx_agent] = []
# Remove stale agents
tx_agents_to_pop = []
for tx_agent in self.enqueued_messages.keys():
if tx_agent not in self.agents.keys():
self.loginfo(
"Agent {} is no longer active, removing".format(tx_agent))
for rx_agent in self.enqueued_messages[tx_agent].keys():
if len(self.enqueued_messages[tx_agent][rx_agent]):
self.logwarn("Dropping messages from removed agent {} to agent {}".format(
tx_agent, rx_agent))
tx_agents_to_pop.append(tx_agent)
else:
rx_agents_to_pop = []
for rx_agent in self.enqueued_messages[tx_agent].keys():
if rx_agent not in self.agents.keys():
self.loginfo(
"Agent {} is no longer active, removing".format(rx_agent))
rx_agents_to_pop.append(rx_agent)
for topop_rx in rx_agents_to_pop:
self.enqueued_messages[tx_agent].pop(topop_rx)
for topop_tx in tx_agents_to_pop:
self.enqueued_messages.pop(topop_tx)
def _update_subscriptions(self):
for agent_name, agent_topics in self.agents.iteritems():
if agent_name not in self.agent_subscriptions.keys():
self.agent_subscriptions[agent_name] = rospy.Subscriber(
agent_topics['tx'], CommData, self.receive, agent_name)
if agent_name not in self.agent_publishers.keys():
self.pub_lock.acquire()
self.agent_publishers[agent_name] = rospy.Publisher(
agent_topics['rx'], CommData, queue_size=def_vals.PUB_QUEUE_SIZE)
self.pub_lock.release()
for agent_name in self.agent_subscriptions.keys():
if agent_name not in self.agents.keys():
self.agent_subscriptions[agent_name].unregister()
self.agent_subscriptions.pop(agent_name)
for agent_name in self.agent_publishers.keys():
if agent_name not in self.agents.keys():
self.agent_publishers.pop(agent_name)
# def receive(self, transmitter: str, msg: CommData):
def receive(self, msg, transmitter):
# Append message to msg queue. The loop function will dequeue if needed.
# Who do we send this to?
receiver = msg.receiver
current_time = rospy.get_time() - self.start_time_offset
data_vol = len(msg.data)*1e3
try:
mock_node_states = {'mock_agent': {'mock_property': None}}
output = self.router(
time=current_time,
source=transmitter,
destination=receiver,
data_vol=data_vol,
contact_plan=json.dumps(self.contact_plan),
nodes_state=json.dumps(mock_node_states),
)
except rospy.ServiceException as e:
self.logerr('Service call failed: {}'.format(e))
self.logerr("Dropping message!")
else:
# Deserialize outputs
next_hop = json.loads(output.next_hop)
# route = json.loads(output.route)
self.logdebug("Received message from node {} to node {}".format(
transmitter, receiver))
if next_hop is None:
self.logerr("ERROR: next_hop is None")
return
self.enqueued_messages[transmitter][next_hop].append(msg)
self.logdebug("There are {} messages in the queue from node {} to node {}".format(len(self.enqueued_messages[transmitter][receiver]),
transmitter, receiver))
def loop(self):
# Get ROS time
current_time = rospy.get_time() - self.start_time_offset
# Loop through links
for link in self.contact_plan:
# If link is active now
if link["time_start"] < current_time and link["time_end"] > current_time and link["bandwidth"] > 0:
# Transmit all messages
while self.enqueued_messages[link["origin"]][link["destination"]]:
assert link["destination"] in self.agent_publishers.keys(
), "ERROR: receiver {} is not in agent publishers".format(link["destination"])
self.pub_lock.acquire()
msg = self.enqueued_messages[link["origin"]
][link["destination"]].pop()
self.logdebug("Forwarding message from {} to {} with destination {}".format(
link["origin"], link["destination"], msg.receiver))
# self.agent_publishers[link["destination"]].publish(msg)
msg_wrapper = message_wrapper(
receive_time=current_time +
len(msg.data)/(link["bandwidth"]*1e3),
destination=link["destination"],
msg=msg
)
self.messages_in_transmission.append(msg_wrapper)
self.activity_publisher.publish(json.dumps({
"duration": msg_wrapper.receive_time-current_time,
"start_time": current_time+self.start_time_offset,
"id": "transfer_data",
"name": "transfer_data",
"params": {
"agent": link["origin"],
"transmitter": link["origin"],
"receiver": link["destination"],
"bandwidth": link["bandwidth"],
"data_type": "{}:{}".format(json.loads(msg_wrapper.msg.data)["req_agent"], json.loads(msg_wrapper.msg.data)["resource_id"]),
"energy_cost": link["energy_cost"],
"reward": 0.,
}
}))
self.logwarn("Message transmission time is {} s (size {} bits, bandwidth {} Mbps)\n Will be received at {} sim time (now is {} sim time), {} UNIX time (now is {} UNIX time)".format(
len(msg.data)*1e3/(link["bandwidth"]*1e6), int(len(msg.data)*1e3), link["bandwidth"], msg_wrapper.receive_time, current_time, msg_wrapper.receive_time+self.start_time_offset, current_time+self.start_time_offset))
self.pub_lock.release()
# These are the messages ready to be received.
ready_msgs = [
msg_wrapper for msg_wrapper in self.messages_in_transmission if msg_wrapper.receive_time <= current_time]
for msg_wrapper in ready_msgs:
# If the message has arrived at its destination:
if msg_wrapper.destination == msg_wrapper.msg.receiver:
self.agent_publishers[msg_wrapper.destination].publish(
msg_wrapper.msg)
# If not, find what is the next hop
else:
data_vol = len(msg_wrapper.msg.data)*1e3
try:
mock_node_states = {'mock_agent': {'mock_property': None}}
output = self.router(
time=current_time,
source=msg_wrapper.destination,
destination=msg_wrapper.msg.receiver,
data_vol=data_vol,
contact_plan=json.dumps(self.contact_plan),
nodes_state=json.dumps(mock_node_states)
)
except rospy.ServiceException as e:
self.logerr('Service call failed: {}'.format(e))
self.logerr("Dropping message!")
else:
# Deserialize outputs
next_hop = json.loads(output.next_hop)
# route = json.loads(output.route)
self.logdebug("Message for {} forwarded to {}".format(
msg_wrapper.destination, next_hop))
self.enqueued_messages[msg_wrapper.destination][next_hop].append(
msg_wrapper.msg)
# Assumption: msg_wrappers are unique
self.messages_in_transmission.remove(msg_wrapper)
# If there are messages waiting or in flight, log it
for origin, destinations in self.enqueued_messages.iteritems():
for destination, msgs in destinations.iteritems():
if len(msgs):
self.logdebug("{} messages enqueued from {} to {}".format(
len(msgs), origin, destination))
if len(self.messages_in_transmission):
self.logdebug("{} messages in transmission".format(
len(self.messages_in_transmission)))
def run(self):
while not rospy.is_shutdown():
self.loop()
sleep(self.sleep_interval)
def logerr(self, msg):
rospy.logerr("[Simple DTN simulator]: {}".format(msg))
def logwarn(self, msg):
rospy.logwarn("[Simple DTN simulator]: {}".format(msg))
def loginfo(self, msg):
rospy.loginfo("[Simple DTN simulator]: {}".format(msg))
def logdebug(self, msg):
rospy.logdebug("[Simple DTN simulator]: {}".format(msg))
if __name__ == "__main__":
rospy.init_node('simple_dtn_sim', anonymous=True)
contact_plan = ast.literal_eval(rospy.get_param('~contact_plan', '[]'))
contact_plan_topic = rospy.get_param(
'~network_topology_topic', '/network_topology')
agents = ast.literal_eval(rospy.get_param(
'~agents', '[]'))
routing_service = rospy.get_param('~routing_service', '/router')
simple_dtn_sim = simple_dtn_simulator(
contact_plan=contact_plan,
start_time_offset=rospy.get_time(),
contact_plan_topic=contact_plan_topic,
routing_service=routing_service,
)
simple_dtn_sim.run()
| 48.944444 | 236 | 0.604553 |
f942e8286374720646e827d9bcca50bac020403e
| 448 |
py
|
Python
|
lello/users/services.py
|
FR98/lello-API
|
2b2deddd04b00d893fdd1194674d354e5002b40e
|
[
"MIT"
] | null | null | null |
lello/users/services.py
|
FR98/lello-API
|
2b2deddd04b00d893fdd1194674d354e5002b40e
|
[
"MIT"
] | null | null | null |
lello/users/services.py
|
FR98/lello-API
|
2b2deddd04b00d893fdd1194674d354e5002b40e
|
[
"MIT"
] | null | null | null |
from django.core.mail import send_mail
from django.template.loader import render_to_string
def enviar_email(to, message):
send_mail(
'Hello from Lello',
message,
'[email protected]',
to,
fail_silently = False,
html_message = render_to_string(
'send/index.html',
{
'message': message
}
)
)
print("Email enviado con exito!")
| 23.578947 | 51 | 0.5625 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.