hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d99a1e98eccb58cbc0c0cef6e9e6702f33461b0e
| 5,886
|
py
|
Python
|
public_data/serializers.py
|
MTES-MCT/sparte
|
3b8ae6d21da81ca761d64ae9dfe2c8f54487211c
|
[
"MIT"
] | null | null | null |
public_data/serializers.py
|
MTES-MCT/sparte
|
3b8ae6d21da81ca761d64ae9dfe2c8f54487211c
|
[
"MIT"
] | 3
|
2022-02-10T11:47:58.000Z
|
2022-02-23T18:50:24.000Z
|
public_data/serializers.py
|
MTES-MCT/sparte
|
3b8ae6d21da81ca761d64ae9dfe2c8f54487211c
|
[
"MIT"
] | null | null | null |
from rest_framework_gis import serializers
from rest_framework import serializers as s
from .models import (
Artificialisee2015to2018,
Artificielle2018,
CommunesSybarval,
CouvertureSol,
EnveloppeUrbaine2018,
Ocsge,
Renaturee2018to2015,
Sybarval,
Voirie2018,
ZonesBaties2018,
UsageSol,
)
def get_label(code="", label=""):
if code is None:
code = "-"
if label is None:
label = "inconnu"
return f"{code} {label[:30]}"
class Artificialisee2015to2018Serializer(serializers.GeoFeatureModelSerializer):
usage_2015 = s.SerializerMethodField()
usage_2018 = s.SerializerMethodField()
couverture_2015 = s.SerializerMethodField()
couverture_2018 = s.SerializerMethodField()
def get_usage_2015(self, obj):
return get_label(code=obj.us_2015, label=obj.us_2015_label)
def get_usage_2018(self, obj):
return get_label(code=obj.us_2018, label=obj.us_2018_label)
def get_couverture_2015(self, obj):
return get_label(code=obj.cs_2015, label=obj.cs_2015_label)
def get_couverture_2018(self, obj):
return get_label(code=obj.cs_2018, label=obj.cs_2018_label)
class Meta:
fields = (
"id",
"surface",
"usage_2015",
"usage_2018",
"couverture_2015",
"couverture_2018",
)
geo_field = "mpoly"
model = Artificialisee2015to2018
class Artificielle2018Serializer(serializers.GeoFeatureModelSerializer):
couverture = s.SerializerMethodField()
def get_couverture(self, obj):
return get_label(code=obj.couverture, label=obj.couverture_label)
class Meta:
fields = (
"id",
"surface",
"couverture",
)
geo_field = "mpoly"
model = Artificielle2018
class CommunesSybarvalSerializer(serializers.GeoFeatureModelSerializer):
"""Marker GeoJSON serializer."""
class Meta:
"""Marker serializer meta class."""
fields = (
"nom",
"code_insee",
"surface",
)
geo_field = "mpoly"
model = CommunesSybarval
class EnveloppeUrbaine2018Serializer(serializers.GeoFeatureModelSerializer):
couverture = s.SerializerMethodField()
def get_couverture(self, obj):
return get_label(code=obj.couverture, label=obj.couverture_label)
class Meta:
fields = (
"id",
"couverture",
"surface",
)
geo_field = "mpoly"
model = EnveloppeUrbaine2018
class OcsgeSerializer(serializers.GeoFeatureModelSerializer):
couverture = s.SerializerMethodField()
usage = s.SerializerMethodField()
def get_couverture(self, obj):
return get_label(code=obj.couverture, label=obj.couverture_label)
def get_usage(self, obj):
return get_label(code=obj.usage, label=obj.usage_label)
class Meta:
fields = (
"id",
"couverture",
"usage",
"millesime",
"map_color",
"year",
)
geo_field = "mpoly"
model = Ocsge
class Renaturee2018to2015Serializer(serializers.GeoFeatureModelSerializer):
usage_2015 = s.SerializerMethodField()
usage_2018 = s.SerializerMethodField()
couverture_2015 = s.SerializerMethodField()
couverture_2018 = s.SerializerMethodField()
def get_usage_2015(self, obj):
return get_label(code=obj.us_2015, label=obj.us_2015_label)
def get_usage_2018(self, obj):
return get_label(code=obj.us_2018, label=obj.us_2018_label)
def get_couverture_2015(self, obj):
return get_label(code=obj.cs_2015, label=obj.cs_2015_label)
def get_couverture_2018(self, obj):
return get_label(code=obj.cs_2018, label=obj.cs_2018_label)
class Meta:
fields = (
"id",
"surface",
"usage_2015",
"usage_2018",
"couverture_2015",
"couverture_2018",
)
geo_field = "mpoly"
model = Renaturee2018to2015
class SybarvalSerializer(serializers.GeoFeatureModelSerializer):
class Meta:
fields = (
"id",
"surface",
)
geo_field = "mpoly"
model = Sybarval
class Voirie2018Serializer(serializers.GeoFeatureModelSerializer):
couverture = s.SerializerMethodField()
usage = s.SerializerMethodField()
def get_couverture(self, obj):
return get_label(code=obj.couverture, label=obj.couverture_label)
def get_usage(self, obj):
return get_label(code=obj.usage, label=obj.usage_label)
class Meta:
fields = (
"id",
"surface",
"couverture",
"usage",
)
geo_field = "mpoly"
model = Voirie2018
class ZonesBaties2018Serializer(serializers.GeoFeatureModelSerializer):
couverture = s.SerializerMethodField()
usage = s.SerializerMethodField()
def get_couverture(self, obj):
return get_label(code=obj.couverture, label=obj.couverture_label)
def get_usage(self, obj):
return get_label(code=obj.usage, label=obj.usage_label)
class Meta:
fields = (
"id",
"couverture",
"usage",
"surface",
)
geo_field = "mpoly"
model = ZonesBaties2018
class CouvertureSolSerializer(serializers.ModelSerializer):
class Meta:
fields = (
"id",
"parent",
"code",
"label",
"is_artificial",
)
model = CouvertureSol
class UsageSolSerializer(serializers.ModelSerializer):
class Meta:
fields = (
"id",
"parent",
"code",
"label",
)
model = UsageSol
| 25.37069
| 80
| 0.613829
| 558
| 5,886
| 6.28853
| 0.130824
| 0.029068
| 0.058136
| 0.072955
| 0.702194
| 0.668852
| 0.666002
| 0.656312
| 0.656312
| 0.623254
| 0
| 0.061826
| 0.29103
| 5,886
| 231
| 81
| 25.480519
| 0.779056
| 0.009514
| 0
| 0.664804
| 0
| 0
| 0.067194
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094972
| false
| 0
| 0.01676
| 0.089385
| 0.418994
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9a0daeef5f3a3455af5c2983af478cd08c74a7b
| 11,247
|
py
|
Python
|
map_download/cmd/TerrainDownloader.py
|
cugxy/map_download
|
02142b33edb2bc163f7ae971f443efe84c13e029
|
[
"MIT"
] | 27
|
2019-04-02T08:34:16.000Z
|
2022-01-11T01:48:50.000Z
|
map_download/cmd/TerrainDownloader.py
|
cugxy/map_download
|
02142b33edb2bc163f7ae971f443efe84c13e029
|
[
"MIT"
] | 8
|
2019-10-10T03:03:51.000Z
|
2021-11-14T11:01:47.000Z
|
map_download/cmd/TerrainDownloader.py
|
cugxy/map_download
|
02142b33edb2bc163f7ae971f443efe84c13e029
|
[
"MIT"
] | 7
|
2019-04-02T08:43:04.000Z
|
2020-08-11T02:14:24.000Z
|
# -*- coding: utf-8 -*-
# coding=utf-8
import json
import os
import math
import logging
import requests
import time
from map_download.cmd.BaseDownloader import DownloadEngine, BaseDownloaderThread, latlng2tile_terrain, BoundBox
def get_access_token(token):
resp = None
request_count = 0
url = "https://api.cesium.com/v1/assets/1/endpoint"
while True:
if request_count > 4:
break
try:
request_count += 1
param = {'access_token': token}
resp = requests.get(url, params=param, timeout=2)
if resp.status_code != 200:
continue
break
except Exception as e:
resp = None
time.sleep(3)
if resp is None:
return None
resp_json = resp.json()
return resp_json.get('accessToken')
class TerrainDownloaderThread(BaseDownloaderThread):
URL = "https://assets.cesium.com/1/{z}/{x}/{y}.terrain?extensions=octvertexnormals-watermask&v=1.1.0"
def __init__(self, root_dir, bbox, token, task_q, logger=None, write_db=False):
super(TerrainDownloaderThread, self).__init__(
root_dir, bbox, task_q, logger, write_db=write_db, db_file_name='Terrain.db')
self.token = token
self._init_metadata(
format='terrain',
bounds='%f,%f,%f,%f' % (self.bbox.min_lng, self.bbox.min_lat, self.bbox.max_lng, self.bbox.max_lat))
def get_url(self, x, y, z):
return self.URL.format(x=x, y=y, z=z)
def _download(self, x, y, z):
file_path = '%s/%s/%i/%i/%i.%s' % (self.root_dir, 'Terrain', z, x, y, 'terrain')
if os.path.exists(file_path):
self._data2DB(x, y, z, file_path)
return 0
os.makedirs(os.path.dirname(file_path), exist_ok=True)
resp = None
requre_count = 0
_url = ''
access_token = get_access_token(self.token)
if access_token is None:
return -1
param = {'extensions': 'octvertexnormals-watermask', 'v': '1.1.0', 'access_token': access_token}
while True:
if requre_count > 4: break
try:
_url = self.get_url(x, y, z)
resp = requests.get(_url, params=param, stream=True, timeout=2)
break
except Exception as e:
resp = None
time.sleep(3)
requre_count += 1
if resp is None:
return -1
if resp.status_code != 200:
return -1
try:
with open(file_path, 'wb') as f:
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
except Exception as e:
return -1
self._data2DB(x, y, z, file_path)
return 1
class TerrainDownloadEngine(DownloadEngine):
root_dir = ''
def __init__(self, root_dir, bbox, token, thread_num, logger=None, write_db=False):
super(TerrainDownloadEngine, self).__init__(bbox, thread_num, logger, write_db=write_db)
self.root_dir = root_dir
self.token = token
def bbox2xyz(self, bbox, z):
min_x, min_y = latlng2tile_terrain(bbox.min_lat, bbox.min_lng, z)
max_x, max_y = latlng2tile_terrain(bbox.max_lat, bbox.max_lng, z)
return math.floor(min_x), math.floor(min_y), math.ceil(max_x) + 1, math.ceil(max_y) + 1
def generate_metadata(self):
try:
metadatas = {
"attribution": "© Analytical Graphics Inc., © CGIAR-CSI, Produced using Copernicus data and "
"information funded by the European Union - EU-DEM layers",
"available": [
[
{
"endX": 1,
"endY": 0,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 3,
"endY": 1,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 7,
"endY": 3,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 15,
"endY": 7,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 31,
"endY": 15,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 63,
"endY": 31,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 127,
"endY": 63,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 255,
"endY": 127,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 511,
"endY": 255,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 1023,
"endY": 511,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 2047,
"endY": 1023,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 4095,
"endY": 2047,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 8191,
"endY": 4095,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 16383,
"endY": 8191,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 32767,
"endY": 16383,
"startX": 0,
"startY": 0
}
]
],
"bounds": [-180, -90, 180, 90, ],
"description": "STK World Terrain Premium Tileset, v1.3. 10m - 30m resolution CONUS, 30m resolution "
"SRTM between 60N and 60S, 30m Europe. Minimum global coverage of 1000m.",
"extensions": ["watermask", "vertexnormals", "octvertexnormals", ],
"format": "quantized-mesh-1.0",
"maxzoom": 13,
"minzoom": 0,
"name": "world",
"projection": "EPSG:4326",
"scheme": "tms",
"tilejson": "2.1.0",
"tiles": ["{z}/{x}/{y}.terrain?v={version}", ],
"version": "1.31376.0"
}
_dir = os.path.join(self.root_dir, 'Terrain')
os.makedirs(_dir, exist_ok=True)
metadatas_path = os.path.join(_dir, 'layer.json')
with open(metadatas_path, 'w') as f:
json.dump(metadatas, f)
except Exception as e:
if self.logger is not None:
self.logger.exception(e)
def run(self):
try:
self.generate_metadata()
count = 0
bboxs = self.cut_bbox()
for bbox in bboxs:
_count = self.get_task_count(bbox)
count += _count
self.division_done_signal.emit(count)
for bbox in bboxs:
while True:
if not self.running:
time.sleep(0.01)
else:
break
task_q = self.get_task_queue(bbox)
self.threads = []
for i in range(self.thread_num):
thread = TerrainDownloaderThread(self.root_dir, self.bbox, self.token, task_q, self.logger,
write_db=self.write_db)
thread.sub_progressBar_updated_signal.connect(self.sub_update_progressBar)
self.threads.append(thread)
for thread in self.threads:
thread.start()
for thread in self.threads:
thread.wait()
for t in self.threads:
t.stop()
t.quit()
self.threads = []
self.download_done_signal.emit()
except Exception as e:
if self.logger is not None:
self.logger.error(e)
if __name__ == '__main__':
if 1:
logger = logging.getLogger('down')
try:
root = r'/Users/cugxy/Documents/data/downloader'
formatter = logging.Formatter('%(levelname)s-%(message)s')
hdlr = logging.StreamHandler()
log_file = os.path.join(root, 'down.log')
file_hdlr = logging.FileHandler(log_file)
file_hdlr.setFormatter(formatter)
logger.addHandler(file_hdlr)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
min_lng = -180.0
max_lng = 180.0
min_lat = -90.0
max_lat = 90.0
start_zoom = 0
end_zoom = 5
bbox = BoundBox(max_lat, max_lng, min_lat, min_lng, start_zoom, end_zoom)
d = TerrainDownloadEngine(root, bbox, 8, logger)
d.start()
time.sleep(10000)
logger.error('main thread out')
except Exception as e:
logger.error(e)
if 0:
accessToken = get_access_token()
pass
| 35.479495
| 117
| 0.384992
| 980
| 11,247
| 4.266327
| 0.257143
| 0.025114
| 0.04664
| 0.050227
| 0.218369
| 0.128199
| 0.088017
| 0.056446
| 0.043052
| 0.043052
| 0
| 0.043237
| 0.518805
| 11,247
| 316
| 118
| 35.591772
| 0.728936
| 0.003112
| 0
| 0.30303
| 0
| 0.003367
| 0.107423
| 0.010707
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026936
| false
| 0.003367
| 0.023569
| 0.003367
| 0.097643
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9aeee22298fa03239ef3d63fdcaa4984d37ba63
| 3,030
|
py
|
Python
|
content/test/gpu/gpu_tests/pixel_expectations.py
|
metux/chromium-deb
|
3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
content/test/gpu/gpu_tests/pixel_expectations.py
|
metux/chromium-deb
|
3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
content/test/gpu/gpu_tests/pixel_expectations.py
|
metux/chromium-deb
|
3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_tests.gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class PixelExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel_Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
# Seems to be flaky on the new AMD R7 240 drivers.
self.Flaky('Pixel_GpuRasterization_BlueBox',
['win', ('amd', 0x6613)], bug=653538)
# Software compositing is not supported on Android; so we skip these tests
# that disables gpu compositing on Android platforms.
self.Skip('Pixel_OffscreenCanvasUnaccelerated2D', ['android'])
self.Skip('Pixel_OffscreenCanvasUnaccelerated2DWorker', ['android'])
self.Skip('Pixel_OffscreenCanvasWebGLSoftwareCompositing', ['android'])
self.Skip('Pixel_OffscreenCanvasWebGLSoftwareCompositingWorker',
['android'])
self.Skip('Pixel_CanvasDisplayLinearRGBUnaccelerated2D', ['android'])
self.Fail('Pixel_ScissorTestWithPreserveDrawingBuffer',
['android'], bug=521588)
# TODO(ccameron) fix these on Mac Retina
self.Fail('Pixel_CSS3DBlueBox', ['mac'], bug=533690)
# TODO(vmiura) check / generate reference images for Android devices
self.Fail('Pixel_SolidColorBackground', ['mac', 'android'], bug=624256)
self.Fail('Pixel_OffscreenCanvasUnaccelerated2DGPUCompositingWorker',
['mac', ('nvidia', 0xfe9)], bug=706016)
self.Fail('Pixel_CSSFilterEffects',
['mac', ('nvidia', 0xfe9)], bug=690277)
# TODO(kbr): flakily timing out on this configuration.
self.Flaky('*', ['linux', 'intel', 'debug'], bug=648369)
self.Flaky('Pixel_Video_MP4', ['android', 'nvidia'], bug=716564)
# Flaky for unknown reasons only on macOS. Not planning to investigate
# further.
self.Flaky('Pixel_ScissorTestWithPreserveDrawingBuffer', ['mac'],
bug=660461)
self.Flaky('Pixel_OffscreenCanvas2DResizeOnWorker',
['win10', ('intel', 0x1912)], bug=690663)
# TODO(zakerinasab): check / generate reference images.
self.Fail('Pixel_Canvas2DUntagged', bug=713632)
self.Flaky('Pixel_OffscreenCanvasTransferBeforeStyleResize',
['mac', 'linux', 'win', 'android'], bug=735228)
self.Flaky('Pixel_OffscreenCanvasTransferAfterStyleResize',
['mac', 'linux', 'win', 'android'], bug=735171)
# TODO(junov): update reference images
self.Fail('Pixel_CSSFilterEffects', ['mac'], bug=721727)
self.Fail('Pixel_CSSFilterEffects_NoOverlays', ['mac'], bug=721727)
# TODO(dshwang): remove these after new reference images are generated.
self.Fail('Pixel_DirectComposition_Video_MP4', bug=615325)
self.Fail('Pixel_DirectComposition_Video_VP9', bug=615325)
self.Fail('Pixel_Video_MP4', bug=615325)
self.Fail('Pixel_Video_VP9', bug=615325)
| 42.083333
| 78
| 0.706931
| 325
| 3,030
| 6.48
| 0.446154
| 0.049383
| 0.080247
| 0.037987
| 0.136752
| 0.041311
| 0.02849
| 0
| 0
| 0
| 0
| 0.062896
| 0.165677
| 3,030
| 71
| 79
| 42.676056
| 0.770174
| 0.289439
| 0
| 0
| 0
| 0
| 0.436739
| 0.330834
| 0
| 0
| 0.010309
| 0.014085
| 0
| 1
| 0.027778
| false
| 0
| 0.027778
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9afca45a6adc9c41c0b981032c729d59e9db234
| 2,801
|
py
|
Python
|
examples/p02_budgets/budget_data_ingest/migrations/0001_initial.py
|
18F/data-federation-ingest
|
a896ef2da1faf3966f018366b26a338bb66cc717
|
[
"CC0-1.0"
] | 18
|
2019-07-26T13:43:01.000Z
|
2022-01-15T14:57:52.000Z
|
examples/p02_budgets/budget_data_ingest/migrations/0001_initial.py
|
18F/data-federation-ingest
|
a896ef2da1faf3966f018366b26a338bb66cc717
|
[
"CC0-1.0"
] | 96
|
2019-06-14T18:30:54.000Z
|
2021-08-03T09:25:02.000Z
|
examples/p02_budgets/budget_data_ingest/migrations/0001_initial.py
|
18F/data-federation-ingest
|
a896ef2da1faf3966f018366b26a338bb66cc717
|
[
"CC0-1.0"
] | 3
|
2020-01-23T04:48:18.000Z
|
2021-01-12T09:31:20.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-08 22:54
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BudgetItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('year', models.IntegerField()),
('agency', models.TextField()),
('data_source', models.TextField()),
('category', models.TextField()),
('dollars_budgeted', models.DecimalField(decimal_places=2, max_digits=14)),
('dollars_spent', models.DecimalField(decimal_places=2, max_digits=14)),
('row_number', models.IntegerField()),
],
),
migrations.CreateModel(
name='Upload',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('file_metadata', django.contrib.postgres.fields.jsonb.JSONField(null=True)),
('file', models.FileField(upload_to='')),
('raw', models.BinaryField(null=True)),
('validation_results', django.contrib.postgres.fields.jsonb.JSONField(null=True)),
('status', models.CharField(choices=[('LOADING', 'Loading'), ('PENDING', 'Pending'), ('STAGED', 'Staged'), ('INSERTED', 'Inserted'), ('DELETED', 'Deleted')], default='LOADING', max_length=10)),
('status_changed_at', models.DateTimeField(null=True)),
('replaces', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='replaced_by', to='budget_data_ingest.Upload')),
('status_changed_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
('submitter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='budgetitem',
name='upload',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='budget_data_ingest.Upload'),
),
]
| 47.474576
| 209
| 0.611567
| 285
| 2,801
| 5.831579
| 0.396491
| 0.028881
| 0.042118
| 0.066185
| 0.465704
| 0.358604
| 0.358604
| 0.358604
| 0.247894
| 0.247894
| 0
| 0.012258
| 0.24277
| 2,801
| 58
| 210
| 48.293103
| 0.771334
| 0.024634
| 0
| 0.22
| 1
| 0
| 0.135581
| 0.018322
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.18
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9b0c3d32e07c56a0732f0fca454740538a940fe
| 451
|
py
|
Python
|
setup.py
|
Kaslanarian/PythonSVM
|
715eeef2a245736167addf45a6aee8b40b54d0c7
|
[
"MIT"
] | 2
|
2021-09-25T01:00:37.000Z
|
2021-09-27T12:13:24.000Z
|
setup.py
|
Kaslanarian/PythonSVM
|
715eeef2a245736167addf45a6aee8b40b54d0c7
|
[
"MIT"
] | 1
|
2021-09-17T12:08:14.000Z
|
2021-09-17T12:08:14.000Z
|
setup.py
|
Kaslanarian/PythonSVM
|
715eeef2a245736167addf45a6aee8b40b54d0c7
|
[
"MIT"
] | null | null | null |
import setuptools #enables develop
setuptools.setup(
name='pysvm',
version='0.1',
description='PySVM : A NumPy implementation of SVM based on SMO algorithm',
author_email="[email protected]",
packages=['pysvm'],
license='MIT License',
long_description=open('README.md', encoding='utf-8').read(),
install_requires=[ #自动安装依赖
'numpy', 'sklearn'
],
url='https://github.com/Kaslanarian/PySVM',
)
| 28.1875
| 79
| 0.660754
| 54
| 451
| 5.462963
| 0.87037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03252
| 0.181818
| 451
| 15
| 80
| 30.066667
| 0.766938
| 0.046563
| 0
| 0
| 0
| 0
| 0.401869
| 0.060748
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.071429
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9b2e0c418fbf0ff7ba59e80c34fb2974714b1c9
| 398
|
py
|
Python
|
polling_stations/apps/data_collection/management/commands/import_torbay.py
|
chris48s/UK-Polling-Stations
|
4742b527dae94f0276d35c80460837be743b7d17
|
[
"BSD-3-Clause"
] | null | null | null |
polling_stations/apps/data_collection/management/commands/import_torbay.py
|
chris48s/UK-Polling-Stations
|
4742b527dae94f0276d35c80460837be743b7d17
|
[
"BSD-3-Clause"
] | null | null | null |
polling_stations/apps/data_collection/management/commands/import_torbay.py
|
chris48s/UK-Polling-Stations
|
4742b527dae94f0276d35c80460837be743b7d17
|
[
"BSD-3-Clause"
] | null | null | null |
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E06000027'
addresses_name = 'parl.2017-06-08/Version 1/Torbay Democracy_Club__08June2017.tsv'
stations_name = 'parl.2017-06-08/Version 1/Torbay Democracy_Club__08June2017.tsv'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
| 44.222222
| 86
| 0.788945
| 47
| 398
| 6.446809
| 0.638298
| 0.079208
| 0.09901
| 0.118812
| 0.369637
| 0.369637
| 0.369637
| 0.369637
| 0.369637
| 0.369637
| 0
| 0.130682
| 0.115578
| 398
| 8
| 87
| 49.75
| 0.730114
| 0
| 0
| 0
| 0
| 0
| 0.38191
| 0.266332
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 1.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9b38469f6b00b7a441fff875e4ecd7bcc272b7e
| 1,832
|
py
|
Python
|
Backend/product/views.py
|
Bhavya0020/Readopolis
|
a0053e4fae97dc8291b50c746f3dc3e6b454ad95
|
[
"MIT"
] | null | null | null |
Backend/product/views.py
|
Bhavya0020/Readopolis
|
a0053e4fae97dc8291b50c746f3dc3e6b454ad95
|
[
"MIT"
] | null | null | null |
Backend/product/views.py
|
Bhavya0020/Readopolis
|
a0053e4fae97dc8291b50c746f3dc3e6b454ad95
|
[
"MIT"
] | null | null | null |
from django.db.models import Q
from django.shortcuts import render
from django.http import Http404
# Create your views here.
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.decorators import api_view
from .models import Product, Category
from .serializers import ProductSerializer, CategorySerializer
class LatestProductsList(APIView):
def get(self, request, format=None):
products = Product.objects.all()[0:4]
serializer = ProductSerializer(products,many=True)
return Response(serializer.data)
class ProductDetail(APIView):
def get_object(self, category_slug, product_slug):
try:
return Product.objects.filter(category__slug=category_slug).get(slug=product_slug)
except Product.DoesNotExist:
raise Http404
def get(self, request, category_slug, product_slug, format= None):
product = self.get_object(category_slug, product_slug)
serializer = ProductSerializer(product)
return Response(serializer.data)
class CategoryDetail(APIView):
def get_object(self, category_slug):
try:
return Category.objects.get(slug=category_slug)
except Category.DoesNotExist:
raise Http404
def get(self, request, category_slug, format= None):
category = self.get_object(category_slug)
serializer = CategorySerializer(category)
return Response(serializer.data)
@api_view(['POST'])
def search(request):
query = request.data.get('query', '')
if query:
products = Product.objects.filter(Q(name__icontains=query) | Q(description__icontains=query))
serializer = ProductSerializer(products, many=True)
return Response(serializer.data)
else:
return Response({"products": []})
| 34.566038
| 101
| 0.715611
| 208
| 1,832
| 6.173077
| 0.293269
| 0.084112
| 0.074766
| 0.087227
| 0.316199
| 0.247664
| 0.247664
| 0.193146
| 0.193146
| 0
| 0
| 0.007478
| 0.197052
| 1,832
| 53
| 102
| 34.566038
| 0.865398
| 0.012555
| 0
| 0.238095
| 0
| 0
| 0.009403
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.190476
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
d9b4dfc1ad39620d7b5b2d1c39ad7fd8f6cec36b
| 819
|
py
|
Python
|
core/src/zeit/cms/settings/interfaces.py
|
rickdg/vivi
|
16134ac954bf8425646d4ad47bdd1f372e089355
|
[
"BSD-3-Clause"
] | 5
|
2019-05-16T09:51:29.000Z
|
2021-05-31T09:30:03.000Z
|
core/src/zeit/cms/settings/interfaces.py
|
rickdg/vivi
|
16134ac954bf8425646d4ad47bdd1f372e089355
|
[
"BSD-3-Clause"
] | 107
|
2019-05-24T12:19:02.000Z
|
2022-03-23T15:05:56.000Z
|
core/src/zeit/cms/settings/interfaces.py
|
rickdg/vivi
|
16134ac954bf8425646d4ad47bdd1f372e089355
|
[
"BSD-3-Clause"
] | 3
|
2020-08-14T11:01:17.000Z
|
2022-01-08T17:32:19.000Z
|
from zeit.cms.i18n import MessageFactory as _
import zope.interface
import zope.schema
class IGlobalSettings(zope.interface.Interface):
"""Global CMS settings."""
default_year = zope.schema.Int(
title=_("Default year"),
min=1900,
max=2100)
default_volume = zope.schema.Int(
title=_("Default volume"),
min=1,
max=54)
def get_working_directory(template):
"""Return the collection which is the main working directory.
template:
Template which will be filled with year and volume. In
``template`` the placeholders $year and $volume will be replaced.
Example: 'online/$year/$volume/foo'
If the respective collection does not exist, it will be created before
returning it.
"""
| 26.419355
| 78
| 0.636142
| 98
| 819
| 5.244898
| 0.571429
| 0.058366
| 0.050584
| 0.070039
| 0.097276
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021886
| 0.274725
| 819
| 30
| 79
| 27.3
| 0.843434
| 0.421245
| 0
| 0
| 0
| 0
| 0.064198
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.538462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
d9b79f86fa592dbe24c72c454192af966a916a5a
| 12,444
|
py
|
Python
|
eth2/beacon/chains/base.py
|
mhchia/trinity
|
e40e475064ca4605887706e9b0e4f8e2349b10cd
|
[
"MIT"
] | null | null | null |
eth2/beacon/chains/base.py
|
mhchia/trinity
|
e40e475064ca4605887706e9b0e4f8e2349b10cd
|
[
"MIT"
] | null | null | null |
eth2/beacon/chains/base.py
|
mhchia/trinity
|
e40e475064ca4605887706e9b0e4f8e2349b10cd
|
[
"MIT"
] | null | null | null |
from abc import (
ABC,
abstractmethod,
)
import logging
from typing import (
TYPE_CHECKING,
Tuple,
Type,
)
from eth._utils.datatypes import (
Configurable,
)
from eth.db.backends.base import (
BaseAtomicDB,
)
from eth.exceptions import (
BlockNotFound,
)
from eth.validation import (
validate_word,
)
from eth_typing import (
Hash32,
)
from eth_utils import (
ValidationError,
encode_hex,
)
from eth2._utils.ssz import (
validate_imported_block_unchanged,
)
from eth2.beacon.db.chain import (
BaseBeaconChainDB,
BeaconChainDB,
)
from eth2.beacon.exceptions import (
BlockClassError,
StateMachineNotFound,
)
from eth2.beacon.types.blocks import (
BaseBeaconBlock,
)
from eth2.beacon.types.states import (
BeaconState,
)
from eth2.beacon.typing import (
FromBlockParams,
Slot,
)
from eth2.beacon.validation import (
validate_slot,
)
if TYPE_CHECKING:
from eth2.beacon.state_machines.base import ( # noqa: F401
BaseBeaconStateMachine,
)
class BaseBeaconChain(Configurable, ABC):
"""
The base class for all BeaconChain objects
"""
chaindb = None # type: BaseBeaconChainDB
chaindb_class = None # type: Type[BaseBeaconChainDB]
sm_configuration = None # type: Tuple[Tuple[Slot, Type[BaseBeaconStateMachine]], ...]
chain_id = None # type: int
#
# Helpers
#
@classmethod
@abstractmethod
def get_chaindb_class(cls) -> Type[BaseBeaconChainDB]:
pass
#
# Chain API
#
@classmethod
@abstractmethod
def from_genesis(cls,
base_db: BaseAtomicDB,
genesis_state: BeaconState,
genesis_block: BaseBeaconBlock) -> 'BaseBeaconChain':
pass
#
# State Machine API
#
@classmethod
@abstractmethod
def get_state_machine_class(
cls,
block: BaseBeaconBlock) -> Type['BaseBeaconStateMachine']:
pass
@abstractmethod
def get_state_machine(self, at_block: BaseBeaconBlock=None) -> 'BaseBeaconStateMachine':
pass
@classmethod
@abstractmethod
def get_state_machine_class_for_block_slot(
cls,
slot: Slot) -> Type['BaseBeaconStateMachine']:
pass
#
# Block API
#
@abstractmethod
def get_block_class(self, block_root: Hash32) -> Type[BaseBeaconBlock]:
pass
@abstractmethod
def create_block_from_parent(self,
parent_block: BaseBeaconBlock,
block_params: FromBlockParams) -> BaseBeaconBlock:
pass
@abstractmethod
def get_block_by_root(self, block_root: Hash32) -> BaseBeaconBlock:
pass
@abstractmethod
def get_canonical_head(self) -> BaseBeaconBlock:
pass
@abstractmethod
def get_score(self, block_root: Hash32) -> int:
pass
@abstractmethod
def ensure_block(self, block: BaseBeaconBlock=None) -> BaseBeaconBlock:
pass
@abstractmethod
def get_block(self) -> BaseBeaconBlock:
pass
@abstractmethod
def get_canonical_block_by_slot(self, slot: Slot) -> BaseBeaconBlock:
pass
@abstractmethod
def get_canonical_block_root(self, slot: Slot) -> Hash32:
pass
@abstractmethod
def import_block(
self,
block: BaseBeaconBlock,
perform_validation: bool=True
) -> Tuple[BaseBeaconBlock, Tuple[BaseBeaconBlock, ...], Tuple[BaseBeaconBlock, ...]]:
pass
class BeaconChain(BaseBeaconChain):
"""
A Chain is a combination of one or more ``StateMachine`` classes. Each ``StateMachine``
is associated with a range of slots. The Chain class acts as a wrapper around these other
StateMachine classes, delegating operations to the appropriate StateMachine depending on the
current block slot number.
"""
logger = logging.getLogger("eth2.beacon.chains.BeaconChain")
chaindb_class = BeaconChainDB # type: Type[BaseBeaconChainDB]
def __init__(self, base_db: BaseAtomicDB) -> None:
if not self.sm_configuration:
raise ValueError(
"The Chain class cannot be instantiated with an empty `sm_configuration`"
)
else:
# TODO implment validate_sm_configuration(self.sm_configuration)
# validate_sm_configuration(self.sm_configuration)
pass
self.chaindb = self.get_chaindb_class()(base_db)
#
# Helpers
#
@classmethod
def get_chaindb_class(cls) -> Type['BaseBeaconChainDB']:
if cls.chaindb_class is None:
raise AttributeError("`chaindb_class` not set")
return cls.chaindb_class
#
# Chain API
#
@classmethod
def from_genesis(cls,
base_db: BaseAtomicDB,
genesis_state: BeaconState,
genesis_block: BaseBeaconBlock) -> 'BaseBeaconChain':
"""
Initialize the ``BeaconChain`` from a genesis state.
"""
sm_class = cls.get_state_machine_class_for_block_slot(genesis_block.slot)
if type(genesis_block) != sm_class.block_class:
raise BlockClassError(
"Given genesis block class: {}, StateMachine.block_class: {}".format(
type(genesis_block),
sm_class.block_class
)
)
chaindb = cls.get_chaindb_class()(db=base_db)
chaindb.persist_state(genesis_state)
return cls._from_genesis_block(base_db, genesis_block)
@classmethod
def _from_genesis_block(cls,
base_db: BaseAtomicDB,
genesis_block: BaseBeaconBlock) -> 'BaseBeaconChain':
"""
Initialize the ``BeaconChain`` from the genesis block.
"""
chaindb = cls.get_chaindb_class()(db=base_db)
chaindb.persist_block(genesis_block, genesis_block.__class__)
return cls(base_db)
#
# StateMachine API
#
@classmethod
def get_state_machine_class(cls, block: BaseBeaconBlock) -> Type['BaseBeaconStateMachine']:
"""
Returns the ``StateMachine`` instance for the given block slot number.
"""
return cls.get_state_machine_class_for_block_slot(block.slot)
@classmethod
def get_state_machine_class_for_block_slot(
cls,
slot: Slot) -> Type['BaseBeaconStateMachine']:
"""
Return the ``StateMachine`` class for the given block slot number.
"""
if cls.sm_configuration is None:
raise AttributeError("Chain classes must define the StateMachines in sm_configuration")
validate_slot(slot)
for start_slot, sm_class in reversed(cls.sm_configuration):
if slot >= start_slot:
return sm_class
raise StateMachineNotFound("No StateMachine available for block slot: #{0}".format(slot))
def get_state_machine(self, at_block: BaseBeaconBlock=None) -> 'BaseBeaconStateMachine':
"""
Return the ``StateMachine`` instance for the given block number.
"""
block = self.ensure_block(at_block)
sm_class = self.get_state_machine_class_for_block_slot(block.slot)
return sm_class(
chaindb=self.chaindb,
block=block,
)
#
# Block API
#
def get_block_class(self, block_root: Hash32) -> Type[BaseBeaconBlock]:
slot = self.chaindb.get_slot_by_root(block_root)
sm_class = self.get_state_machine_class_for_block_slot(slot)
block_class = sm_class.block_class
return block_class
def create_block_from_parent(self,
parent_block: BaseBeaconBlock,
block_params: FromBlockParams) -> BaseBeaconBlock:
"""
Passthrough helper to the ``StateMachine`` class of the block descending from the
given block.
"""
return self.get_state_machine_class_for_block_slot(
slot=parent_block.slot + 1 if block_params.slot is None else block_params.slot,
).create_block_from_parent(parent_block, block_params)
def get_block_by_root(self, block_root: Hash32) -> BaseBeaconBlock:
"""
Return the requested block as specified by block hash.
Raise ``BlockNotFound`` if there's no block with the given hash in the db.
"""
validate_word(block_root, title="Block Hash")
block_class = self.get_block_class(block_root)
return self.chaindb.get_block_by_root(block_root, block_class)
def get_canonical_head(self) -> BaseBeaconBlock:
"""
Return the block at the canonical chain head.
Raise ``CanonicalHeadNotFound`` if there's no head defined for the canonical chain.
"""
block_root = self.chaindb.get_canonical_head_root()
block_class = self.get_block_class(block_root)
return self.chaindb.get_block_by_root(block_root, block_class)
def get_score(self, block_root: Hash32) -> int:
"""
Return the score of the block with the given hash.
Raise ``BlockNotFound`` if there is no matching black hash.
"""
return self.chaindb.get_score(block_root)
def ensure_block(self, block: BaseBeaconBlock=None) -> BaseBeaconBlock:
"""
Return ``block`` if it is not ``None``, otherwise return the block
of the canonical head.
"""
if block is None:
head = self.get_canonical_head()
return self.create_block_from_parent(head, FromBlockParams())
else:
return block
def get_block(self) -> BaseBeaconBlock:
"""
Return the current TIP block.
"""
return self.get_state_machine().block
def get_canonical_block_by_slot(self, slot: Slot) -> BaseBeaconBlock:
"""
Return the block with the given number in the canonical chain.
Raise ``BlockNotFound`` if there's no block with the given number in the
canonical chain.
"""
validate_slot(slot)
return self.get_block_by_root(self.chaindb.get_canonical_block_root(slot))
def get_canonical_block_root(self, slot: Slot) -> Hash32:
"""
Return the block hash with the given number in the canonical chain.
Raise ``BlockNotFound`` if there's no block with the given number in the
canonical chain.
"""
return self.chaindb.get_canonical_block_root(slot)
def import_block(
self,
block: BaseBeaconBlock,
perform_validation: bool=True
) -> Tuple[BaseBeaconBlock, Tuple[BaseBeaconBlock, ...], Tuple[BaseBeaconBlock, ...]]:
"""
Import a complete block and returns a 3-tuple
- the imported block
- a tuple of blocks which are now part of the canonical chain.
- a tuple of blocks which were canonical and now are no longer canonical.
"""
try:
parent_block = self.get_block_by_root(block.previous_block_root)
except BlockNotFound:
raise ValidationError(
"Attempt to import block #{}. Cannot import block {} before importing "
"its parent block at {}".format(
block.slot,
block.signed_root,
block.previous_block_root,
)
)
base_block_for_import = self.create_block_from_parent(
parent_block,
FromBlockParams(),
)
state, imported_block = self.get_state_machine(base_block_for_import).import_block(block)
# Validate the imported block.
if perform_validation:
validate_imported_block_unchanged(imported_block, block)
# TODO: Now it just persists all state. Should design how to clean up the old state.
self.chaindb.persist_state(state)
(
new_canonical_blocks,
old_canonical_blocks,
) = self.chaindb.persist_block(imported_block, imported_block.__class__)
self.logger.debug(
'IMPORTED_BLOCK: slot %s | signed root %s',
imported_block.slot,
encode_hex(imported_block.signed_root),
)
return imported_block, new_canonical_blocks, old_canonical_blocks
| 30.955224
| 99
| 0.634201
| 1,348
| 12,444
| 5.622404
| 0.141691
| 0.017417
| 0.025729
| 0.02375
| 0.485024
| 0.445046
| 0.38224
| 0.338831
| 0.288429
| 0.264283
| 0
| 0.003717
| 0.286564
| 12,444
| 401
| 100
| 31.032419
| 0.849966
| 0.180006
| 0
| 0.400794
| 0
| 0
| 0.064676
| 0.019259
| 0
| 0
| 0
| 0.004988
| 0
| 1
| 0.126984
| false
| 0.063492
| 0.115079
| 0
| 0.34127
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
d9ba3c5b12232bbc811a9ad606f2570ac2481108
| 10,492
|
py
|
Python
|
nova/conf/hyperv.py
|
raubvogel/nova
|
b78be4e83cdc191e20a4a61b6aae72cb2b37f62b
|
[
"Apache-2.0"
] | null | null | null |
nova/conf/hyperv.py
|
raubvogel/nova
|
b78be4e83cdc191e20a4a61b6aae72cb2b37f62b
|
[
"Apache-2.0"
] | null | null | null |
nova/conf/hyperv.py
|
raubvogel/nova
|
b78be4e83cdc191e20a4a61b6aae72cb2b37f62b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016 TUBITAK BILGEM
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
hyperv_opt_group = cfg.OptGroup("hyperv",
title='The Hyper-V feature',
help="""
The hyperv feature allows you to configure the Hyper-V hypervisor
driver to be used within an OpenStack deployment.
""")
hyperv_opts = [
cfg.FloatOpt('dynamic_memory_ratio',
default=1.0,
help="""
Dynamic memory ratio
Enables dynamic memory allocation (ballooning) when set to a value
greater than 1. The value expresses the ratio between the total RAM
assigned to an instance and its startup RAM amount. For example a
ratio of 2.0 for an instance with 1024MB of RAM implies 512MB of
RAM allocated at startup.
Possible values:
* 1.0: Disables dynamic memory allocation (Default).
* Float values greater than 1.0: Enables allocation of total implied
RAM divided by this value for startup.
"""),
cfg.BoolOpt('enable_instance_metrics_collection',
default=False,
help="""
Enable instance metrics collection
Enables metrics collections for an instance by using Hyper-V's
metric APIs. Collected data can be retrieved by other apps and
services, e.g.: Ceilometer.
"""),
cfg.StrOpt('instances_path_share',
default="",
help="""
Instances path share
The name of a Windows share mapped to the "instances_path" dir
and used by the resize feature to copy files to the target host.
If left blank, an administrative share (hidden network share) will
be used, looking for the same "instances_path" used locally.
Possible values:
* "": An administrative share will be used (Default).
* Name of a Windows share.
Related options:
* "instances_path": The directory which will be used if this option
here is left blank.
"""),
cfg.BoolOpt('limit_cpu_features',
default=False,
help="""
Limit CPU features
This flag is needed to support live migration to hosts with
different CPU features and checked during instance creation
in order to limit the CPU features used by the instance.
"""),
cfg.IntOpt('mounted_disk_query_retry_count',
default=10,
min=0,
help="""
Mounted disk query retry count
The number of times to retry checking for a mounted disk.
The query runs until the device can be found or the retry
count is reached.
Possible values:
* Positive integer values. Values greater than 1 is recommended
(Default: 10).
Related options:
* Time interval between disk mount retries is declared with
"mounted_disk_query_retry_interval" option.
"""),
cfg.IntOpt('mounted_disk_query_retry_interval',
default=5,
min=0,
help="""
Mounted disk query retry interval
Interval between checks for a mounted disk, in seconds.
Possible values:
* Time in seconds (Default: 5).
Related options:
* This option is meaningful when the mounted_disk_query_retry_count
is greater than 1.
* The retry loop runs with mounted_disk_query_retry_count and
mounted_disk_query_retry_interval configuration options.
"""),
cfg.IntOpt('power_state_check_timeframe',
default=60,
min=0,
help="""
Power state check timeframe
The timeframe to be checked for instance power state changes.
This option is used to fetch the state of the instance from Hyper-V
through the WMI interface, within the specified timeframe.
Possible values:
* Timeframe in seconds (Default: 60).
"""),
cfg.IntOpt('power_state_event_polling_interval',
default=2,
min=0,
help="""
Power state event polling interval
Instance power state change event polling frequency. Sets the
listener interval for power state events to the given value.
This option enhances the internal lifecycle notifications of
instances that reboot themselves. It is unlikely that an operator
has to change this value.
Possible values:
* Time in seconds (Default: 2).
"""),
cfg.StrOpt('qemu_img_cmd',
default="qemu-img.exe",
help="""
qemu-img command
qemu-img is required for some of the image related operations
like converting between different image types. You can get it
from here: (http://qemu.weilnetz.de/) or you can install the
Cloudbase OpenStack Hyper-V Compute Driver
(https://cloudbase.it/openstack-hyperv-driver/) which automatically
sets the proper path for this config option. You can either give the
full path of qemu-img.exe or set its path in the PATH environment
variable and leave this option to the default value.
Possible values:
* Name of the qemu-img executable, in case it is in the same
directory as the nova-compute service or its path is in the
PATH environment variable (Default).
* Path of qemu-img command (DRIVELETTER:\PATH\TO\QEMU-IMG\COMMAND).
Related options:
* If the config_drive_cdrom option is False, qemu-img will be used to
convert the ISO to a VHD, otherwise the config drive will
remain an ISO. To use config drive with Hyper-V, you must
set the ``mkisofs_cmd`` value to the full path to an ``mkisofs.exe``
installation.
"""),
cfg.StrOpt('vswitch_name',
help="""
External virtual switch name
The Hyper-V Virtual Switch is a software-based layer-2 Ethernet
network switch that is available with the installation of the
Hyper-V server role. The switch includes programmatically managed
and extensible capabilities to connect virtual machines to both
virtual networks and the physical network. In addition, Hyper-V
Virtual Switch provides policy enforcement for security, isolation,
and service levels. The vSwitch represented by this config option
must be an external one (not internal or private).
Possible values:
* If not provided, the first of a list of available vswitches
is used. This list is queried using WQL.
* Virtual switch name.
"""),
cfg.IntOpt('wait_soft_reboot_seconds',
default=60,
min=0,
help="""
Wait soft reboot seconds
Number of seconds to wait for instance to shut down after soft
reboot request is made. We fall back to hard reboot if instance
does not shutdown within this window.
Possible values:
* Time in seconds (Default: 60).
"""),
cfg.BoolOpt('config_drive_cdrom',
default=False,
help="""
Mount config drive as a CD drive.
OpenStack can be configured to write instance metadata to a config drive, which
is then attached to the instance before it boots. The config drive can be
attached as a disk drive (default) or as a CD drive.
Related options:
* This option is meaningful with ``force_config_drive`` option set to ``True``
or when the REST API call to create an instance will have
``--config-drive=True`` flag.
* ``config_drive_format`` option must be set to ``iso9660`` in order to use
CD drive as the config drive image.
* To use config drive with Hyper-V, you must set the
``mkisofs_cmd`` value to the full path to an ``mkisofs.exe`` installation.
Additionally, you must set the ``qemu_img_cmd`` value to the full path
to an ``qemu-img`` command installation.
* You can configure the Compute service to always create a configuration
drive by setting the ``force_config_drive`` option to ``True``.
"""),
cfg.BoolOpt('config_drive_inject_password',
default=False,
help="""
Inject password to config drive.
When enabled, the admin password will be available from the config drive image.
Related options:
* This option is meaningful when used with other options that enable
config drive usage with Hyper-V, such as ``force_config_drive``.
"""),
cfg.IntOpt('volume_attach_retry_count',
default=10,
min=0,
help="""
Volume attach retry count
The number of times to retry attaching a volume. Volume attachment
is retried until success or the given retry count is reached.
Possible values:
* Positive integer values (Default: 10).
Related options:
* Time interval between attachment attempts is declared with
volume_attach_retry_interval option.
"""),
cfg.IntOpt('volume_attach_retry_interval',
default=5,
min=0,
help="""
Volume attach retry interval
Interval between volume attachment attempts, in seconds.
Possible values:
* Time in seconds (Default: 5).
Related options:
* This options is meaningful when volume_attach_retry_count
is greater than 1.
* The retry loop runs with volume_attach_retry_count and
volume_attach_retry_interval configuration options.
"""),
cfg.BoolOpt('enable_remotefx',
default=False,
help="""
Enable RemoteFX feature
This requires at least one DirectX 11 capable graphics adapter for
Windows / Hyper-V Server 2012 R2 or newer and RDS-Virtualization
feature has to be enabled.
Instances with RemoteFX can be requested with the following flavor
extra specs:
**os:resolution**. Guest VM screen resolution size. Acceptable values::
1024x768, 1280x1024, 1600x1200, 1920x1200, 2560x1600, 3840x2160
``3840x2160`` is only available on Windows / Hyper-V Server 2016.
**os:monitors**. Guest VM number of monitors. Acceptable values::
[1, 4] - Windows / Hyper-V Server 2012 R2
[1, 8] - Windows / Hyper-V Server 2016
**os:vram**. Guest VM VRAM amount. Only available on
Windows / Hyper-V Server 2016. Acceptable values::
64, 128, 256, 512, 1024
"""),
cfg.BoolOpt('use_multipath_io',
default=False,
help="""
Use multipath connections when attaching iSCSI or FC disks.
This requires the Multipath IO Windows feature to be enabled. MPIO must be
configured to claim such devices.
"""),
cfg.ListOpt('iscsi_initiator_list',
default=[],
help="""
List of iSCSI initiators that will be used for estabilishing iSCSI sessions.
If none are specified, the Microsoft iSCSI initiator service will choose the
initiator.
""")
]
def register_opts(conf):
conf.register_group(hyperv_opt_group)
conf.register_opts(hyperv_opts, group=hyperv_opt_group)
def list_opts():
return {hyperv_opt_group: hyperv_opts}
| 31.04142
| 79
| 0.735989
| 1,552
| 10,492
| 4.906572
| 0.278351
| 0.026001
| 0.016809
| 0.022062
| 0.222981
| 0.161261
| 0.127118
| 0.088378
| 0.066448
| 0.052265
| 0
| 0.018754
| 0.191956
| 10,492
| 337
| 80
| 31.133531
| 0.879453
| 0.057568
| 0
| 0.31746
| 0
| 0
| 0.841993
| 0.065836
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007937
| false
| 0.011905
| 0.003968
| 0.003968
| 0.015873
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9bd741cd9ad9e20eeb1069fce4709781f43edd4
| 6,476
|
py
|
Python
|
Qt_interface/add_subject.py
|
kithsirij/NLP-based-Syllabus-Coverage-Exam-paper-checker-Tool
|
b7b38a7b7c6d0a2ad5264df32acd75cdef552bd0
|
[
"MIT"
] | 1
|
2019-07-17T09:08:41.000Z
|
2019-07-17T09:08:41.000Z
|
Qt_interface/add_subject.py
|
kithsirij/NLP-based-Syllabus-Coverage-Exam-paper-checker-Tool
|
b7b38a7b7c6d0a2ad5264df32acd75cdef552bd0
|
[
"MIT"
] | null | null | null |
Qt_interface/add_subject.py
|
kithsirij/NLP-based-Syllabus-Coverage-Exam-paper-checker-Tool
|
b7b38a7b7c6d0a2ad5264df32acd75cdef552bd0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'add_subject.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog_add_subject(object):
def setupUi(self, Dialog_add_subject):
Dialog_add_subject.setObjectName(_fromUtf8("Dialog_add_subject"))
Dialog_add_subject.resize(568, 374)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Times New Roman"))
font.setPointSize(10)
Dialog_add_subject.setFont(font)
Dialog_add_subject.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("Qt_interface/SE_syllabus/4zIr6y.jpg")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog_add_subject.setWindowIcon(icon)
self.lbl_subject_name = QtGui.QLabel(Dialog_add_subject)
self.lbl_subject_name.setGeometry(QtCore.QRect(50, 235, 131, 21))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Times New Roman"))
font.setPointSize(12)
self.lbl_subject_name.setFont(font)
self.lbl_subject_name.setObjectName(_fromUtf8("lbl_subject_name"))
self.label_add_subject = QtGui.QLabel(Dialog_add_subject)
self.label_add_subject.setGeometry(QtCore.QRect(220, 30, 151, 31))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Times New Roman"))
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_add_subject.setFont(font)
self.label_add_subject.setObjectName(_fromUtf8("label_add_subject"))
self.lineEdit_subject_name = QtGui.QLineEdit(Dialog_add_subject)
self.lineEdit_subject_name.setGeometry(QtCore.QRect(190, 230, 321, 31))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Times New Roman"))
font.setPointSize(12)
self.lineEdit_subject_name.setFont(font)
self.lineEdit_subject_name.setObjectName(_fromUtf8("lineEdit_subject_name"))
self.label_year = QtGui.QLabel(Dialog_add_subject)
self.label_year.setGeometry(QtCore.QRect(50, 95, 81, 21))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Times New Roman"))
font.setPointSize(12)
self.label_year.setFont(font)
self.label_year.setObjectName(_fromUtf8("label_year"))
self.label_semester = QtGui.QLabel(Dialog_add_subject)
self.label_semester.setGeometry(QtCore.QRect(50, 165, 91, 21))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Times New Roman"))
font.setPointSize(12)
self.label_semester.setFont(font)
self.label_semester.setObjectName(_fromUtf8("label_semester"))
self.pushButton_save = QtGui.QPushButton(Dialog_add_subject)
self.pushButton_save.setGeometry(QtCore.QRect(190, 290, 111, 31))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Times New Roman"))
font.setPointSize(10)
self.pushButton_save.setFont(font)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8("Qt_interface/SE_syllabus/Save-as.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_save.setIcon(icon1)
self.pushButton_save.setIconSize(QtCore.QSize(20, 20))
self.pushButton_save.setObjectName(_fromUtf8("pushButton_save"))
self.pushButton_cancel = QtGui.QPushButton(Dialog_add_subject)
self.pushButton_cancel.setGeometry(QtCore.QRect(340, 290, 111, 31))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Times New Roman"))
self.pushButton_cancel.setFont(font)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8("Qt_interface/SE_syllabus/if_draw-08_725558.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_cancel.setIcon(icon2)
self.pushButton_cancel.setIconSize(QtCore.QSize(20, 20))
self.pushButton_cancel.setObjectName(_fromUtf8("pushButton_cancel"))
self.comboBox_year = QtGui.QComboBox(Dialog_add_subject)
self.comboBox_year.setGeometry(QtCore.QRect(190, 91, 111, 31))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Times New Roman"))
font.setPointSize(12)
self.comboBox_year.setFont(font)
self.comboBox_year.setObjectName(_fromUtf8("comboBox_year"))
self.comboBox_semester = QtGui.QComboBox(Dialog_add_subject)
self.comboBox_semester.setGeometry(QtCore.QRect(190, 160, 111, 31))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Times New Roman"))
font.setPointSize(12)
self.comboBox_semester.setFont(font)
self.comboBox_semester.setObjectName(_fromUtf8("comboBox_semester"))
self.retranslateUi(Dialog_add_subject)
QtCore.QObject.connect(self.pushButton_cancel, QtCore.SIGNAL(_fromUtf8("clicked()")), self.lineEdit_subject_name.clear)
QtCore.QMetaObject.connectSlotsByName(Dialog_add_subject)
def retranslateUi(self, Dialog_add_subject):
Dialog_add_subject.setWindowTitle(_translate("Dialog_add_subject", "Dialog", None))
self.lbl_subject_name.setText(_translate("Dialog_add_subject", "SUBJECT NAME", None))
self.label_add_subject.setText(_translate("Dialog_add_subject", "ADD SUBJECT", None))
self.label_year.setText(_translate("Dialog_add_subject", "YEAR", None))
self.label_semester.setText(_translate("Dialog_add_subject", "SEMESTER", None))
self.pushButton_save.setText(_translate("Dialog_add_subject", "SAVE", None))
self.pushButton_cancel.setText(_translate("Dialog_add_subject", "CANCEL", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog_add_subject = QtGui.QDialog()
ui = Ui_Dialog_add_subject()
ui.setupUi(Dialog_add_subject)
Dialog_add_subject.show()
sys.exit(app.exec_())
| 48.691729
| 137
| 0.694719
| 761
| 6,476
| 5.654402
| 0.198423
| 0.092958
| 0.118987
| 0.041831
| 0.478039
| 0.408552
| 0.364397
| 0.263072
| 0.228213
| 0.206832
| 0
| 0.033788
| 0.195645
| 6,476
| 132
| 138
| 49.060606
| 0.792283
| 0.028567
| 0
| 0.293103
| 1
| 0
| 0.100634
| 0.022435
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043103
| false
| 0
| 0.017241
| 0.025862
| 0.094828
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9c3024853794c19d2ce2400c8d47311441430b2
| 8,513
|
py
|
Python
|
src/main/python/rlbot/version.py
|
IamEld3st/RLBot
|
36195ffd3a836ed910ce63aed8ba103b98b7b361
|
[
"MIT"
] | null | null | null |
src/main/python/rlbot/version.py
|
IamEld3st/RLBot
|
36195ffd3a836ed910ce63aed8ba103b98b7b361
|
[
"MIT"
] | null | null | null |
src/main/python/rlbot/version.py
|
IamEld3st/RLBot
|
36195ffd3a836ed910ce63aed8ba103b98b7b361
|
[
"MIT"
] | null | null | null |
# Store the version here so:
# 1) we don't load dependencies by storing it in __init__.py
# 2) we can import it in setup.py for the same reason
# 3) we can import it into your module module
# https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package
__version__ = '1.6.1'
release_notes = {
'1.6.1': """
Fixed GUI crash when loading certain RLBot config files with relative paths for agents.
Fixed agent preset loading to allow multiple agents to saved/loaded correctly if they have the same name. - ima9rd
""",
'1.6.0':"""
Add support for auto starting .NET executables.
""",
'1.5.1': """
Fixed crash with GUI when no default RLBot.cfg file was found.
Updated GUI to launch Rocket League when clicking run if no Rocket League process is found. - ima9rd
""",
'1.5.0': """
Adding a have_internet helper function to help streamline upgrade checks. - ima9rd
""",
'1.4.2': """
Adding support for auto-running java bots during tournaments. To take advantage of this
in your bot, see https://github.com/RLBot/RLBotJavaExample/wiki/Auto-Launching-Java
Plus bug fixes:
- Fixed a bug where auto-run executables would crash when trying to write to stderr.
- Dragging bots to another team in the GUI no longer breaks the config.
""",
'1.3.0': """
Accurate ball prediction for Hoops and Dropshot modes!
- Kipje13, Marvin, NeverCast, et. al.
""",
'1.2.6': """
Fixed a bug where field info was not extracted properly during dropshot mode.
It was reporting 2 goals rather than the expected 140.
""",
'1.2.5': """
***************************************************
* Fix for dodge cancels / half flips! - ccman32 *
***************************************************
Plus:
- Changing the rendering strategy for 3D lines that go past the camera. Formerly it was
"draw it, even though it's crazy sometimes", now it will be "don't draw it".
- Showing the rate that inputs are received for each player index when you press the
[home] key. Toggle back off with the [end] key.
- Fixed a bug where party_member_bot could get influenced by real controller input.
- Creating new presets in the GUI works better now.
- Got rid of the libpng warning seen when using the GUI.
- Giving specific error messages when cfg files are messed up.
""",
'1.2.2': """
- Rearranged the GUI a bit, and made it load and track appearance configs more effectively.
- Fixed bug where RUN button behavior in the GUI would not work after killing bots.
""",
'1.2.0': """
- We now offer a 'RigidBodyTick' thanks to whatisaphone! It's a lower-level representation of
physics data which updates at 120Hz and is not subject to interpolation. You can still make a
great bot without it, but this feature is quite nice for the scientists among us.
See https://github.com/RLBot/RLBotPythonExample/wiki/Rigid-Body-Tick for more details!
- Faster way to access ball prediction data in python. - Skyborg
""",
'1.1.3': """
- Faster way to access ball prediction data in python. - Skyborg
- Java bots will now shut down when the python framework quits. This has been necessary recently
to avoid buggy situations.
- Shutting down the python framework will no longer attempt to kill bots twice in a row.
- Clicking on the "Run" button twice in a row in the GUI will no longer spawn duplicate processes.
""",
'1.1.2': """
Faster way to access ball prediction data in python. - Skyborg
""",
'1.1.1': """
You can now get information about the ball's status in Dropshot mode thanks to hallo_doei!
Read all about it at https://github.com/RLBot/RLBot/wiki/Dropshot
Other changes:
- The loadout config for orange team is now respected again. - ccman32
- Fixed a bug where the GUI would crash with a "KeyError". - hallo_doei
- Avoiding and suppressing some game crashes, and also restoring the
ability to get game tick data during replays and the postgame. - tarehart
- Fixed a bug where bots would dodge when they intended to double jump. -tarehart
""",
'1.0.6': """
The latest Rocket League patch broke dodges for our bots; this update fixes it.
""",
'1.0.5': """
Maximum size for a render message has been decreased again because many people experienced
errors related to memory access. The limit is now only double the original.
""",
'1.0.4': """
- Maximum size for a render message has been increased by a factor of 100. This means you can
draw a lot of lines at once without getting errors.
- Boost amount for cars will now round up to the nearest integer, so 0.3% boost will now appear
as 1 instead of 0.
- Fixed a crash that would commonly happen after a match ends. As a side effect, you can no longer
see up-to-date player data during instant replays.
""",
'1.0.3': """
Time for the big 1.0 release! We actually left "beta" a long time ago so this isn't as big
a milestone as the number implies, but we DO have two great new features!
1. Setting game state. You can manipulate the position, velocity, etc of the ball and the cars!
This can be a great help during bot development, and you can also get creative with it. Visit
the wiki for details and documentation - https://github.com/RLBot/RLBot/wiki/Manipulating-Game-State
Code written by hallo_doei, ccman32, and tarehart
2. Ball prediction. We now provide a list of future ball positions based on chip's excellent
physics modeling. Take advantage of this to do next-level wall reads, catches, and dribbles! You can
read about the math involved here: https://samuelpmish.github.io/notes/RocketLeague/ball_bouncing/
Note: currently the wall bounces are only accurate on the standard arena, not hoops or dropshot.
Documentation and examples can be found here: https://github.com/RLBot/RLBot/wiki/Ball-Path-Prediction
Code written by chip and tarehart
Bonus:
- You can now play on Salty Shores thanks to hallo_doei
- Bug fix for people with spaces in their file path by Zaptive
- Subprocess agent for future Rust support by whatisaphone
""",
'0.0.32': """
More comprehensive fix for Rocket League patch 1.50. Compared to previous version:
- Dropshot tile data is fixed
- Boost pad data is fixed
- Loadout configuration is fixed
Thanks to ccman32 and dtracers for delivering this fix quickly!
""",
'0.0.31': """
Rapid response to Rocket League patch 1.50 with the following known issues:
- Dropshot tile data is missing
- Boost pad data is missing
- Loadout configuration is broken
Thanks to ccman32 and dtracers for delivering this short-term fix quickly.
We will follow this up with a proper fix as soon as possible. You may also choose to stay on
Rocket League 1.49 and RLBot 0.0.30, ask for instructions on discord.
""",
'0.0.30': """
- New core dll that is less likely to break when Rocket League is patched - ccman32 and hallo-doei
- Fixed bug resulting in incorrect quickchat - dtracers
- Added more built-in colors to the python rendering manager - Eastvillage
- Fix for items with a ':' not showing up in the GUI - hallo-doei
- Fix for GUI not saving correct path - hallo-doei
- Fix for GUI crash when saving preset then canceling - hallo-doei
- Adding file checking before injection (Resolves #167) - Redox
- Fixed typo in rlbot.cfg - Redox
- Fancy release notes - tarehart and Skyborg
"""
}
release_banner = """
______ _ ______ _
10100 | ___ \ | | ___ \ | | 00101
110011 | |_/ / | | |_/ / ___ | |_ 110011
00110110 | /| | | ___ \/ _ \| __| 01101100
010010 | |\ \| |____| |_/ / (_) | |_ 010010
10010 \_| \_\_____/\____/ \___/ \__| 01001
"""
def get_current_release_notes():
if __version__ in release_notes:
return release_notes[__version__]
return ''
def get_help_text():
return "Trouble? Ask on Discord at https://discord.gg/5cNbXgG " \
"or report an issue at https://github.com/RLBot/RLBot/issues"
def print_current_release_notes():
print(release_banner)
print("Version {}".format(__version__))
print(get_current_release_notes())
print(get_help_text())
print("")
| 45.768817
| 118
| 0.677787
| 1,269
| 8,513
| 4.467297
| 0.394011
| 0.008467
| 0.014817
| 0.020109
| 0.095608
| 0.07444
| 0.054683
| 0.054683
| 0.027165
| 0.027165
| 0
| 0.028786
| 0.23282
| 8,513
| 185
| 119
| 46.016216
| 0.839228
| 0.032186
| 0
| 0.158228
| 0
| 0.101266
| 0.909632
| 0.012389
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018987
| false
| 0
| 0
| 0.006329
| 0.037975
| 0.037975
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9c387f6c561372e064bfe33f0566d9f2a1cdd50
| 399
|
py
|
Python
|
Task2C.py
|
StanleyHou117/group66_LentTermProject
|
0255310cb202f21cada8cf7c0f45a045a9b72c1f
|
[
"MIT"
] | null | null | null |
Task2C.py
|
StanleyHou117/group66_LentTermProject
|
0255310cb202f21cada8cf7c0f45a045a9b72c1f
|
[
"MIT"
] | null | null | null |
Task2C.py
|
StanleyHou117/group66_LentTermProject
|
0255310cb202f21cada8cf7c0f45a045a9b72c1f
|
[
"MIT"
] | null | null | null |
from floodsystem.stationdata import build_station_list
from floodsystem.flood import stations_highest_rel_level
def run():
stations = build_station_list()
warning_stations = stations_highest_rel_level(stations,10)
for entry in warning_stations:
print(entry[0].name,entry[1])
if __name__ == "__main__":
print("*** Task 2C: CUED Part IA Flood Warning System ***")
run()
| 28.5
| 63
| 0.734336
| 53
| 399
| 5.150943
| 0.584906
| 0.10989
| 0.117216
| 0.168498
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01506
| 0.16792
| 399
| 14
| 64
| 28.5
| 0.807229
| 0
| 0
| 0
| 0
| 0
| 0.145
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.3
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9c3b05c7fcf1f87eb65a4b552deef9342032f24
| 6,520
|
py
|
Python
|
src/Components/missions/GEMS/mcd43c.py
|
GEOS-ESM/AeroApps
|
874dad6f34420c014d98eccbe81a061bdc0110cf
|
[
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-12-02T14:23:30.000Z
|
2021-12-31T15:39:30.000Z
|
src/Components/missions/GEMS/mcd43c.py
|
GEOS-ESM/AeroApps
|
874dad6f34420c014d98eccbe81a061bdc0110cf
|
[
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | 9
|
2020-04-15T16:22:14.000Z
|
2022-03-24T13:59:25.000Z
|
src/Components/missions/SENTINEL-4/mcd43c.py
|
GEOS-ESM/AeroApps
|
874dad6f34420c014d98eccbe81a061bdc0110cf
|
[
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
"""
Reads climate modeling grid 0.05 degree MCD43 BRDF files.
"""
import os
import sys
from numpy import loadtxt, array, tile, where, concatenate, flipud
from numpy import ones
from datetime import date, datetime, timedelta
from glob import glob
from pyhdf.SD import SD, HDF4Error
MISSING = 32.767
SDS = dict (
LAND = ('BRDF_Albedo_Parameter1_Band1','BRDF_Albedo_Parameter1_Band2',
'BRDF_Albedo_Parameter1_Band3','BRDF_Albedo_Parameter1_Band4',
'BRDF_Albedo_Parameter1_Band5','BRDF_Albedo_Parameter1_Band6',
'BRDF_Albedo_Parameter1_Band7',
'BRDF_Albedo_Parameter2_Band1','BRDF_Albedo_Parameter2_Band2',
'BRDF_Albedo_Parameter2_Band3','BRDF_Albedo_Parameter2_Band4',
'BRDF_Albedo_Parameter2_Band5','BRDF_Albedo_Parameter2_Band6',
'BRDF_Albedo_Parameter2_Band7',
'BRDF_Albedo_Parameter3_Band1','BRDF_Albedo_Parameter3_Band2',
'BRDF_Albedo_Parameter3_Band3','BRDF_Albedo_Parameter3_Band4',
'BRDF_Albedo_Parameter3_Band5','BRDF_Albedo_Parameter3_Band6',
'BRDF_Albedo_Parameter3_Band7'),
QUAL = ('BRDF_Albedo_Quality',
'Snow_BRDF_Albedo',
'BRDF_Albedo_Ancillary', )
)
ALIAS = dict ( BRDF_Albedo_Parameter1_Band1 = 'KISO_b1_645',
BRDF_Albedo_Parameter1_Band2 = 'KISO_b2_856',
BRDF_Albedo_Parameter1_Band3 = 'KISO_b3_465',
BRDF_Albedo_Parameter1_Band4 = 'KISO_b4_553',
BRDF_Albedo_Parameter1_Band5 = 'KISO_b5_1241',
BRDF_Albedo_Parameter1_Band6 = 'KISO_b6_1629',
BRDF_Albedo_Parameter1_Band7 = 'KISO_b7_2114',
BRDF_Albedo_Parameter2_Band1 = 'KVOL_b1_645',
BRDF_Albedo_Parameter2_Band2 = 'KVOL_b2_856',
BRDF_Albedo_Parameter2_Band3 = 'KVOL_b3_465',
BRDF_Albedo_Parameter2_Band4 = 'KVOL_b4_553',
BRDF_Albedo_Parameter2_Band5 = 'KVOL_b5_1241',
BRDF_Albedo_Parameter2_Band6 = 'KVOL_b6_1629',
BRDF_Albedo_Parameter2_Band7 = 'KVOL_b7_2114',
BRDF_Albedo_Parameter3_Band1 = 'KGEO_b1_645',
BRDF_Albedo_Parameter3_Band2 = 'KGEO_b2_856',
BRDF_Albedo_Parameter3_Band3 = 'KGEO_b3_465',
BRDF_Albedo_Parameter3_Band4 = 'KGEO_b4_553',
BRDF_Albedo_Parameter3_Band5 = 'KGEO_b5_1241',
BRDF_Albedo_Parameter3_Band6 = 'KGEO_b6_1629',
BRDF_Albedo_Parameter3_Band7 = 'KGEO_b7_2114',
)
#...........................................................................
class McD43C(object):
"""
This class implements the MODIS LAND BRDF 16-day Level 3 products, MCD43C1 (0.05 degree horz res),
"""
def __init__ (self,Path,lon,lat,Verb=1):
"""
Reads files for one day of Level 3 MCD43C1
present on a given *Path* and returns an object with
all 3 kernels coeff. On input,
Required parameters:
Path -- for now a single file. Eventually implement a single directory, or a list
of files and directories.
"""
if type(lon) is list:
lon = array(lon)
lat = array(lat)
# List of HDF files for a given date
#-----------------------------------
self.verb = Verb
self.SDS = SDS['LAND']
#self.Tfiles = glob(Path + '*.hdf')
if type(Path) is str:
self.Files = [Path]
else:
self.Files = Path
# From a list of lat and lon, return the
# dx, dy on the grid
# -------------------------------------
self.nobs = len(lon)
self._findNearest(Path,lon,lat)
# Read BRDF kernel in a MODIS tile
# ---------------------------------
self.read_BRDF()
# Result
#---
def _findNearest(self,path,lon,lat):
"""Given a list of lat, lon, return numbers to find the
position of the nearest neighbor on the grid (dx,dy)
"""
dLon = 0.05
dLat = 0.05
Lon0 = -180 - dLon
Lat0 = -90 + dLat
self.dx = (0.5+(lon-Lon0)/dLon).astype(int)
self.dy = (0.5+(lat-Lat0)/dLat).astype(int)
if self.verb:
print 'dx','dy', self.dx,self.dy
#---
def read_BRDF(self):
"""Reads MCD43C1 file with Level 3 BRDF kernels for each MODIS band."""
# Create empty lists for SDS to be read from file
# -----------------------------------------------
for name in self.SDS:
self.__dict__[name] = []
BRDF = MISSING * ones((len(self.SDS),self.nobs))
for fn in self.Files:
try:
if self.verb:
print "[] Working on "+fn
hfile = SD(fn)
except HDF4Error:
if self.verb > 2:
print "- %s: not recognized as an HDF file"%filename
return
# Read select variables (reshape to allow concatenation later)
# ------------------------------------------------------------
for sds in self.SDS:
if self.verb:
print 'sds',self.SDS.index(sds)
v = hfile.select(sds).get()
a = hfile.select(sds).attributes()
if a['scale_factor']!=1.0 or a['add_offset']!=0.0:
v = a['scale_factor'] * v + a['add_offset']
if self.verb:
print array(self.dx), BRDF.shape, BRDF[self.SDS.index(sds),:], v.shape
v = flipud(v)
BRDF[self.SDS.index(sds),:] = v[array(self.dy), array(self.dx)]
for sds in self.SDS:
self.__dict__[sds] = BRDF[self.SDS.index(sds),:]
if sds in ALIAS.keys():
self.__dict__[ALIAS[sds]] = self.__dict__[sds]
#---
#............................................................................
if __name__ == "__main__":
path = '/nobackup/3/pcastell/MODIS/MCD43C1/MCD43C1.A2005361.005.2008094071946.hdf'
lon = [-2.,-120.,15.2,17.2,170.1]
lat = [88.,40.,-20.,-20.,-55.5]
lon = np.arange(-180,180,1)
lat = np.arange(-90,90,1)
lon,lat = np.meshgrid(lon,lat)
ex = McD43C(path,lon.flatten(),lat.flatte())
| 36.222222
| 103
| 0.533282
| 747
| 6,520
| 4.373494
| 0.273092
| 0.137741
| 0.085706
| 0.018365
| 0.039792
| 0.012244
| 0
| 0
| 0
| 0
| 0
| 0.067998
| 0.316564
| 6,520
| 179
| 104
| 36.424581
| 0.665171
| 0.100767
| 0
| 0.056604
| 0
| 0
| 0.209649
| 0.133752
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.066038
| null | null | 0.04717
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9c7946fa7c34a185ec10fc47b862efa2f519a9d
| 19,770
|
py
|
Python
|
uhd_restpy/testplatform/sessions/ixnetwork/quicktest/learnframes_58e01d83db5d99bcabff902f5cf6ec51.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20
|
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
uhd_restpy/testplatform/sessions/ixnetwork/quicktest/learnframes_58e01d83db5d99bcabff902f5cf6ec51.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60
|
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
uhd_restpy/testplatform/sessions/ixnetwork/quicktest/learnframes_58e01d83db5d99bcabff902f5cf6ec51.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13
|
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class LearnFrames(Base):
"""The learning frames that IxNetwork sends during the test.
The LearnFrames class encapsulates a required learnFrames resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'learnFrames'
_SDM_ATT_MAP = {
'FastPathEnable': 'fastPathEnable',
'FastPathLearnFrameSize': 'fastPathLearnFrameSize',
'FastPathNumFrames': 'fastPathNumFrames',
'FastPathRate': 'fastPathRate',
'LearnFrameSize': 'learnFrameSize',
'LearnFrequency': 'learnFrequency',
'LearnNumFrames': 'learnNumFrames',
'LearnRate': 'learnRate',
'LearnSendMacOnly': 'learnSendMacOnly',
'LearnSendRouterSolicitation': 'learnSendRouterSolicitation',
'LearnWaitTime': 'learnWaitTime',
'LearnWaitTimeBeforeTransmit': 'learnWaitTimeBeforeTransmit',
}
_SDM_ENUM_MAP = {
'learnFrequency': ['never', 'onBinaryIteration', 'oncePerFramesize', 'oncePerTest', 'onTrial'],
}
def __init__(self, parent, list_op=False):
super(LearnFrames, self).__init__(parent, list_op)
@property
def FastPathEnable(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, enables fast path transmit.
"""
return self._get_attribute(self._SDM_ATT_MAP['FastPathEnable'])
@FastPathEnable.setter
def FastPathEnable(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['FastPathEnable'], value)
@property
def FastPathLearnFrameSize(self):
# type: () -> int
"""
Returns
-------
- number: Specifies the size of the learning frames in the fast path.
"""
return self._get_attribute(self._SDM_ATT_MAP['FastPathLearnFrameSize'])
@FastPathLearnFrameSize.setter
def FastPathLearnFrameSize(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['FastPathLearnFrameSize'], value)
@property
def FastPathNumFrames(self):
# type: () -> int
"""
Returns
-------
- number: Specifies the number of learn frames that IxNetwork sends through fast path.
"""
return self._get_attribute(self._SDM_ATT_MAP['FastPathNumFrames'])
@FastPathNumFrames.setter
def FastPathNumFrames(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['FastPathNumFrames'], value)
@property
def FastPathRate(self):
# type: () -> int
"""
Returns
-------
- number: Specifies the rate at which IxNetwork sends learn frames through fast path.
"""
return self._get_attribute(self._SDM_ATT_MAP['FastPathRate'])
@FastPathRate.setter
def FastPathRate(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['FastPathRate'], value)
@property
def LearnFrameSize(self):
# type: () -> int
"""
Returns
-------
- number: Specifies the size of the learning frames.
"""
return self._get_attribute(self._SDM_ATT_MAP['LearnFrameSize'])
@LearnFrameSize.setter
def LearnFrameSize(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['LearnFrameSize'], value)
@property
def LearnFrequency(self):
# type: () -> str
"""
Returns
-------
- str(never | onBinaryIteration | oncePerFramesize | oncePerTest | onTrial): Allows to choose how frequently IxNetwork sends learning frames during the test.
"""
return self._get_attribute(self._SDM_ATT_MAP['LearnFrequency'])
@LearnFrequency.setter
def LearnFrequency(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['LearnFrequency'], value)
@property
def LearnNumFrames(self):
# type: () -> int
"""
Returns
-------
- number: Specifies the number of learning frames that IxNetwork sends for each address.
"""
return self._get_attribute(self._SDM_ATT_MAP['LearnNumFrames'])
@LearnNumFrames.setter
def LearnNumFrames(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['LearnNumFrames'], value)
@property
def LearnRate(self):
# type: () -> int
"""
Returns
-------
- number: Specifies the rate at which IxNetwork sends learn frames to the DUT.
"""
return self._get_attribute(self._SDM_ATT_MAP['LearnRate'])
@LearnRate.setter
def LearnRate(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['LearnRate'], value)
@property
def LearnSendMacOnly(self):
# type: () -> bool
"""
Returns
-------
- bool: Sends learning frames to MAC address only.
"""
return self._get_attribute(self._SDM_ATT_MAP['LearnSendMacOnly'])
@LearnSendMacOnly.setter
def LearnSendMacOnly(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['LearnSendMacOnly'], value)
@property
def LearnSendRouterSolicitation(self):
# type: () -> bool
"""
Returns
-------
- bool: Sends router solicitation messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['LearnSendRouterSolicitation'])
@LearnSendRouterSolicitation.setter
def LearnSendRouterSolicitation(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['LearnSendRouterSolicitation'], value)
@property
def LearnWaitTime(self):
# type: () -> int
"""
Returns
-------
- number: Specifies the length of time in ms that IxNetwork pauses before sending all the learning frames from all the ports.
"""
return self._get_attribute(self._SDM_ATT_MAP['LearnWaitTime'])
@LearnWaitTime.setter
def LearnWaitTime(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['LearnWaitTime'], value)
@property
def LearnWaitTimeBeforeTransmit(self):
# type: () -> int
"""
Returns
-------
- number: Specifies the length of time in ms that IxNetwork pauses before sending all the
"""
return self._get_attribute(self._SDM_ATT_MAP['LearnWaitTimeBeforeTransmit'])
@LearnWaitTimeBeforeTransmit.setter
def LearnWaitTimeBeforeTransmit(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['LearnWaitTimeBeforeTransmit'], value)
def update(self, FastPathEnable=None, FastPathLearnFrameSize=None, FastPathNumFrames=None, FastPathRate=None, LearnFrameSize=None, LearnFrequency=None, LearnNumFrames=None, LearnRate=None, LearnSendMacOnly=None, LearnSendRouterSolicitation=None, LearnWaitTime=None, LearnWaitTimeBeforeTransmit=None):
# type: (bool, int, int, int, int, str, int, int, bool, bool, int, int) -> LearnFrames
"""Updates learnFrames resource on the server.
Args
----
- FastPathEnable (bool): If true, enables fast path transmit.
- FastPathLearnFrameSize (number): Specifies the size of the learning frames in the fast path.
- FastPathNumFrames (number): Specifies the number of learn frames that IxNetwork sends through fast path.
- FastPathRate (number): Specifies the rate at which IxNetwork sends learn frames through fast path.
- LearnFrameSize (number): Specifies the size of the learning frames.
- LearnFrequency (str(never | onBinaryIteration | oncePerFramesize | oncePerTest | onTrial)): Allows to choose how frequently IxNetwork sends learning frames during the test.
- LearnNumFrames (number): Specifies the number of learning frames that IxNetwork sends for each address.
- LearnRate (number): Specifies the rate at which IxNetwork sends learn frames to the DUT.
- LearnSendMacOnly (bool): Sends learning frames to MAC address only.
- LearnSendRouterSolicitation (bool): Sends router solicitation messages.
- LearnWaitTime (number): Specifies the length of time in ms that IxNetwork pauses before sending all the learning frames from all the ports.
- LearnWaitTimeBeforeTransmit (number): Specifies the length of time in ms that IxNetwork pauses before sending all the
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def Apply(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the apply operation on the server.
Applies the specified Quick Test.
apply(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the applyAsync operation on the server.
applyAsync(async_operation=bool)
--------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[bool, None]
"""Executes the applyAsyncResult operation on the server.
applyAsyncResult(async_operation=bool)bool
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool:
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the applyITWizardConfiguration operation on the server.
Applies the specified Quick Test.
applyITWizardConfiguration(async_operation=bool)
------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the generateReport operation on the server.
Generate a PDF report for the last succesfull test run.
generateReport(async_operation=bool)string
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: This method is asynchronous and has no return value.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the run operation on the server.
Starts the specified Quick Test and waits for its execution to finish.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
run(async_operation=bool)list
-----------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
run(InputParameters=string, async_operation=bool)list
-----------------------------------------------------
- InputParameters (str): The input arguments of the test.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('run', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
Starts the specified Quick Test.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(InputParameters=string, async_operation=bool)
---------------------------------------------------
- InputParameters (str): The input arguments of the test.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
Stops the currently running Quick Test.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def WaitForTest(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the waitForTest operation on the server.
Waits for the execution of the specified Quick Test to be completed.
waitForTest(async_operation=bool)list
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('waitForTest', payload=payload, response_object=None)
| 44.728507
| 304
| 0.644917
| 2,223
| 19,770
| 5.650022
| 0.127305
| 0.021497
| 0.018631
| 0.025876
| 0.68949
| 0.663296
| 0.605573
| 0.595701
| 0.558519
| 0.519984
| 0
| 0.002926
| 0.239454
| 19,770
| 441
| 305
| 44.829932
| 0.832402
| 0.527112
| 0
| 0.270833
| 0
| 0
| 0.13391
| 0.042006
| 0
| 0
| 0
| 0
| 0
| 1
| 0.243056
| false
| 0
| 0.020833
| 0
| 0.451389
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9caf13b41f36d2f1d5f56fee8cc8d3745513f23
| 18,402
|
py
|
Python
|
Yellow_Pages_Lithuania/unit_tests.py
|
Jay4C/Web-Scraping
|
187679bee035dad661d983b5a8382240f820c337
|
[
"MIT"
] | 1
|
2022-02-28T05:05:06.000Z
|
2022-02-28T05:05:06.000Z
|
Yellow_Pages_Lithuania/unit_tests.py
|
Jay4C/Web-Scraping
|
187679bee035dad661d983b5a8382240f820c337
|
[
"MIT"
] | 23
|
2020-03-04T22:17:32.000Z
|
2021-01-21T09:35:33.000Z
|
Yellow_Pages_Lithuania/unit_tests.py
|
Jay4C/Web-Scraping
|
187679bee035dad661d983b5a8382240f820c337
|
[
"MIT"
] | null | null | null |
import time
from bs4 import BeautifulSoup
import requests
import pymysql.cursors
import unittest
class UnitTestsDataMinerYellowPagesLithuania(unittest.TestCase):
def test_extract_one_email(self):
url = "https://www.visalietuva.lt/en/company/astorija-hotel-uab"
# Request the content of a page from the url
html = requests.get(url)
# Parse the content of html_doc
soup = BeautifulSoup(html.content, 'html.parser')
if soup.find('a', {'itemprop': 'email'}) is not None:
email = "info@" + soup.find('a', {'itemprop': 'email'}).text.split("@")[1]
print('email : ' + email)
else:
print('no email business')
def test_extract_emails_from_all_page_of_results_for_one_activity_and_capital(self):
activity = "hotel"
city = "vilniuje"
url_search = "https://www.visalietuva.lt/en/search/" + activity + "/" + city
html_search = requests.get(url_search)
soup_search = BeautifulSoup(html_search.content, 'html.parser')
number_of_pages = 0
if soup_search.find('div', {'class': 'search_count f_left'}) is not None:
number_of_pages_with_coma = int(soup_search
.find('div', {'class': 'search_count f_left'})
.find('span').text
)/20
if int(str(number_of_pages_with_coma).split(".")[1][:1]) < 5:
number_of_pages += round(number_of_pages_with_coma) + 1
print('number_of_pages : ' + str(number_of_pages))
elif int(str(number_of_pages_with_coma).split(".")[1][:1]) >= 5:
number_of_pages += round(number_of_pages_with_coma)
print('number_of_pages : ' + str(number_of_pages))
i_1 = 0
if soup_search.find('div', {'class': 'company_list'}) is not None:
print(url_search)
for result_item in soup_search \
.find('div', {'class': 'company_list'}) \
.find_all('div', {'class': 'item'}):
i_1 += 1
url_result = result_item.find('a', {'class': 'company-item-title'}).get('href')
time.sleep(2)
# Request the content of a page from the url
html_result = requests.get(url_result)
# Parse the content of html_doc
soup_result = BeautifulSoup(html_result.content, 'html.parser')
if soup_result.find('a', {'itemprop': 'email'}) is not None:
email = "info@" + soup_result.find('a', {'itemprop': 'email'}).text.split("@")[1]
print(str(i_1) + ' email : ' + email)
else:
print(str(i_1) + ' no email business')
else:
print('sorry there is nothing')
if number_of_pages > 1:
for i in range(2, number_of_pages+1):
url_of_one_page_of_results = url_search + "/" + str(i)
print(url_of_one_page_of_results)
time.sleep(2)
html_of_one_page_of_results = requests.get(url_of_one_page_of_results)
soup_of_one_page_of_results = BeautifulSoup(html_of_one_page_of_results.content, 'html.parser')
if soup_of_one_page_of_results.find('div', {'class': 'company_list'}) is not None:
for result_item in soup_of_one_page_of_results\
.find('div', {'class': 'company_list'})\
.find_all('div', {'class': 'item'}):
i_1 += 1
url_result = result_item.find('a', {'class': 'company-item-title'}).get('href')
# Request the content of a page from the url
html_result = requests.get(url_result)
# Parse the content of html_doc
soup_result = BeautifulSoup(html_result.content, 'html.parser')
if soup_result.find('a', {'itemprop': 'email'}) is not None:
email = "info@" + soup_result.find('a', {'itemprop': 'email'}).text.split("@")[1]
print(str(i_1) + ' email : ' + email)
else:
print(str(i_1) + ' no email business')
else:
print('sorry there is nothing')
def test_extract_emails_from_all_page_of_results_for_all_activities_and_capitals(self):
activites = [
# {'id': '1', 'url': 'labour'}
#{'id': '2', 'url': 'real+estate'},
#{'id': '3', 'url': 'recruitment'},
#{'id': '4', 'url': 'software'},
#{'id': '5', 'url': 'hotel'},
#{'id': '6', 'url': 'landlord'},
#{'id': '7', 'url': 'cleaning'},
#{'id': '8', 'url': 'association'},
#{'id': '9', 'url': 'financial'},
#{'id': '10', 'url': 'restaurant'},
#{'id': '11', 'url': 'building'},
#{'id': '12', 'url': 'hairdresser'},
#{'id': '13', 'url': 'florist'},
#{'id': '14', 'url': 'locksmith'},
#{'id': '15', 'url': 'bakery'},
#{'id': '16', 'url': 'insurance'},
#{'id': '17', 'url': 'pharmacy'},
#{'id': '18', 'url': 'moving'},
#{'id': '19', 'url': 'electricity'},
#{'id': '20', 'url': 'plumbing'},
#{'id': '21', 'url': 'security'},
#{'id': '22', 'url': 'lawyer'},
#{'id': '23', 'url': 'bank'},
#{'id': '24', 'url': 'garage'},
#{'id': '25', 'url': 'dentist'},
#{'id': '26', 'url': 'doctor'},
#{'id': '27', 'url': 'accounting'},
#{'id': '28', 'url': 'store'},
#{'id': '29', 'url': 'notary'},
#{'id': '30', 'url': 'jeweller'},
#{'id': '31', 'url': 'tailor'},
#{'id': '32', 'url': 'meat'},
#{'id': '33', 'url': 'library'},
#{'id': '34', 'url': 'architect'}
]
capitales_du_monde = [
{'id': '183', 'nom': 'akmeneje'},#Akmenė
{'id': '184', 'nom': 'alytuje'},#Alytus
{'id': '185', 'nom': 'anyksciuose'},#Anykščiai
{'id': '186', 'nom': 'birstone'},#Birštonas
{'id': '187', 'nom': 'birzuose'},#Biržai
{'id': '188', 'nom': 'druskininkuose'},#Druskininkai
{'id': '189', 'nom': 'elektrenuose'},#Elektrėnai
{'id': '190', 'nom': 'ignalinoje'},#Ignalina
{'id': '191', 'nom': 'jonavoje'},#Jonava
{'id': '192', 'nom': 'joniskyje'},#Joniškis
{'id': '193', 'nom': 'jurbarke'},#Jurbarkas
{'id': '194', 'nom': 'kaisiadoryse'},#Kaišiadorys
{'id': '195', 'nom': 'kalvarijoje'},#Kalvarija
{'id': '196', 'nom': 'kaune'},#Kaunas
{'id': '197', 'nom': 'kazlu-rudoje'},#Kazlų Rūda
{'id': '198', 'nom': 'kedainiuose'},#Kėdainiai
{'id': '199', 'nom': 'kelmeje'},#Kelmė
{'id': '200', 'nom': 'klaipedoje'},#Klaipėda
{'id': '201', 'nom': 'kretingoje'},#Kretinga
{'id': '202', 'nom': 'kupiskyje'},#Kupiškis
{'id': '203', 'nom': 'lazdijuose'},#Lazdijai
{'id': '204', 'nom': 'marijampoleje'},#Marijampolė
{'id': '205', 'nom': 'mazeikiuose'},#Mažeikiai
{'id': '206', 'nom': 'moletuose'},#Molėtai
{'id': '207', 'nom': 'neringoje'},#Neringa
{'id': '208', 'nom': 'pagegiuose'},#Pagėgiai
{'id': '209', 'nom': 'pakruojyje'},#Pakruojis
{'id': '210', 'nom': 'palangoje'},#Palanga
{'id': '211', 'nom': 'panevezyje'},#Panevėžys
{'id': '212', 'nom': 'pasvalyje'},#Pasvalys
{'id': '213', 'nom': 'plungeje'},#Plungė
{'id': '214', 'nom': 'prienuose'},#Prienai
{'id': '215', 'nom': 'radviliskyje'},#Radviliškis
{'id': '216', 'nom': 'raseiniuose'},#Raseiniai
{'id': '217', 'nom': 'rietave'},#Rietavas
{'id': '218', 'nom': 'rokiskyje'},#Rokiškis
{'id': '219', 'nom': 'sakiuose'},#Šakiai
{'id': '220', 'nom': 'salcininkuose'},#Šalčininkai
{'id': '221', 'nom': 'siauliuose'},#Šiauliai
{'id': '222', 'nom': 'silaleje'},#Šilalė
{'id': '223', 'nom': 'siluteje'},#Šilutė
{'id': '224', 'nom': 'sirvintose'},#Širvintos
{'id': '225', 'nom': 'skuode'},#Skuodas
{'id': '226', 'nom': 'svencionyse'},#Švenčionys
{'id': '227', 'nom': 'taurageje'},#Tauragė
{'id': '228', 'nom': 'telsiuose'},#Telšiai
{'id': '229', 'nom': 'trakuose'},#Trakai
{'id': '230', 'nom': 'ukmergeje'},#Ukmergė
{'id': '231', 'nom': 'utenoje'},#Utena
{'id': '232', 'nom': 'varenoje'},#Varėna
{'id': '233', 'nom': 'vilkaviskyje'},#Vilkaviškis
{'id': '234', 'nom': 'vilniuje'},#Vilnius
{'id': '235', 'nom': 'visagine'},#Visaginas
{'id': '236', 'nom': 'zarasuose'}#Zarasai
]
try:
for capitale in capitales_du_monde:
for activite in activites:
try:
activity = activite.get("url")
city = capitale.get("nom")
url_search = "https://www.visalietuva.lt/en/search/" + activity + "/" + city
html_search = requests.get(url_search)
soup_search = BeautifulSoup(html_search.content, 'html.parser')
number_of_pages = 0
if soup_search.find('div', {'class': 'search_count f_left'}) is not None:
number_of_pages_with_coma = int(soup_search
.find('div', {'class': 'search_count f_left'})
.find('span').text
) / 20
if int(str(number_of_pages_with_coma).split(".")[1][:1]) < 5:
number_of_pages += round(number_of_pages_with_coma) + 1
print('number_of_pages : ' + str(number_of_pages))
elif int(str(number_of_pages_with_coma).split(".")[1][:1]) >= 5:
number_of_pages += round(number_of_pages_with_coma)
print('number_of_pages : ' + str(number_of_pages))
i_1 = 0
if soup_search.find('div', {'class': 'company_list'}) is not None:
print(url_search)
for result_item in soup_search \
.find('div', {'class': 'company_list'}) \
.find_all('div', {'class': 'item'}):
i_1 += 1
url_result = result_item.find('a', {'class': 'company-item-title'}).get('href')
time.sleep(2)
# Request the content of a page from the url
html_result = requests.get(url_result)
# Parse the content of html_doc
soup_result = BeautifulSoup(html_result.content, 'html.parser')
if soup_result.find('a', {'itemprop': 'email'}) is not None:
email = "info@" + soup_result.find('a', {'itemprop': 'email'}).text.split("@")[1]
try:
connection = pymysql.connect(
host='localhost',
port=3306,
user='',
password='',
db='contacts_professionnels',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
with connection.cursor() as cursor:
try:
sql = "INSERT INTO `emails` (" \
"`id_activite`, " \
"`id_capitale_du_monde`, " \
"`email`) VALUE (%s, %s, %s)"
cursor.execute(sql, (
activite.get('id'),
capitale.get('id'),
email))
connection.commit()
print(str(i_1) + " The record is stored : " + email)
connection.close()
except:
print(str(i_1) + " The record already exists : " + email)
connection.close()
except Exception as e:
print(str(i_1) + " An error with the email : " + email + " " + str(e))
else:
print(str(i_1) + ' no email business')
else:
print('sorry there is nothing')
if number_of_pages > 1:
for i in range(2, number_of_pages + 1):
url_of_one_page_of_results = url_search + "/" + str(i)
print(url_of_one_page_of_results)
time.sleep(2)
html_of_one_page_of_results = requests.get(url_of_one_page_of_results)
soup_of_one_page_of_results = BeautifulSoup(html_of_one_page_of_results.content,
'html.parser')
if soup_of_one_page_of_results.find('div', {'class': 'company_list'}) is not None:
for result_item in soup_of_one_page_of_results \
.find('div', {'class': 'company_list'}) \
.find_all('div', {'class': 'item'}):
i_1 += 1
url_result = result_item.find('a', {'class': 'company-item-title'}).get('href')
# Request the content of a page from the url
html_result = requests.get(url_result)
# Parse the content of html_doc
soup_result = BeautifulSoup(html_result.content, 'html.parser')
if soup_result.find('a', {'itemprop': 'email'}) is not None:
email = "info@" + \
soup_result.find('a', {'itemprop': 'email'}).text.split("@")[1]
try:
connection = pymysql.connect(
host='localhost',
port=3306,
user='',
password='',
db='contacts_professionnels',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
with connection.cursor() as cursor:
try:
sql = "INSERT INTO `emails` (" \
"`id_activite`, " \
"`id_capitale_du_monde`, " \
"`email`) VALUE (%s, %s, %s)"
cursor.execute(sql, (
activite.get('id'),
capitale.get('id'),
email))
connection.commit()
print(str(i_1) + " The record is stored : " + email)
connection.close()
except:
print(str(i_1) + " The record already exists : " + email)
connection.close()
except Exception as e:
print(str(i_1) + " An error with the email : " + email + " " + str(e))
else:
print(str(i_1) + ' no email business')
else:
print('sorry there is nothing')
except Exception as e:
print("There is an error connection at url : " + str(e))
finally:
print('done')
if __name__ == '__main__':
unittest.main()
| 53.0317
| 119
| 0.395392
| 1,592
| 18,402
| 4.378141
| 0.232412
| 0.032138
| 0.052224
| 0.025251
| 0.657389
| 0.647489
| 0.647489
| 0.643472
| 0.638737
| 0.638737
| 0
| 0.029659
| 0.463156
| 18,402
| 346
| 120
| 53.184971
| 0.675878
| 0.101185
| 0
| 0.639216
| 0
| 0
| 0.1551
| 0.005596
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011765
| false
| 0.007843
| 0.019608
| 0
| 0.035294
| 0.109804
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9cb561a08fd3aac17d5adf4c0665d1418e60e6a
| 3,262
|
py
|
Python
|
python_modules/dagster/dagster_tests/compat_tests/test_back_compat.py
|
vatervonacht/dagster
|
595d78c883ef20618052ac1575fe46cde51fd541
|
[
"Apache-2.0"
] | 3
|
2020-04-28T16:27:33.000Z
|
2020-07-22T07:43:30.000Z
|
python_modules/dagster/dagster_tests/compat_tests/test_back_compat.py
|
vatervonacht/dagster
|
595d78c883ef20618052ac1575fe46cde51fd541
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster_tests/compat_tests/test_back_compat.py
|
vatervonacht/dagster
|
595d78c883ef20618052ac1575fe46cde51fd541
|
[
"Apache-2.0"
] | 1
|
2021-02-21T12:16:47.000Z
|
2021-02-21T12:16:47.000Z
|
# pylint: disable=protected-access
import os
import re
import pytest
from dagster import file_relative_path
from dagster.core.errors import DagsterInstanceMigrationRequired
from dagster.core.instance import DagsterInstance, InstanceRef
from dagster.utils.test import restore_directory
# test that we can load runs and events from an old instance
def test_0_6_4():
test_dir = file_relative_path(__file__, 'snapshot_0_6_4')
with restore_directory(test_dir):
instance = DagsterInstance.from_ref(InstanceRef.from_dir(test_dir))
runs = instance.get_runs()
with pytest.raises(
DagsterInstanceMigrationRequired,
match=re.escape(
'Instance is out of date and must be migrated (SqliteEventLogStorage for run '
'c7a6c4d7-6c88-46d0-8baa-d4937c3cefe5). Database is at revision None, head is '
'567bc23fd1ac. Please run `dagster instance migrate`.'
),
):
for run in runs:
instance.all_logs(run.run_id)
def test_0_6_6_sqlite_exc():
test_dir = file_relative_path(__file__, 'snapshot_0_6_6/sqlite')
with restore_directory(test_dir):
instance = DagsterInstance.from_ref(InstanceRef.from_dir(test_dir))
runs = instance.get_runs()
# Note that this is a deliberate choice -- old runs are simply invisible, and their
# presence won't raise DagsterInstanceMigrationRequired. This is a reasonable choice since
# the runs.db has moved and otherwise we would have to do a check for the existence of an
# old runs.db every time we accessed the runs. Instead, we'll do this only in the upgrade
# method.
assert len(runs) == 0
run_ids = instance._event_storage.get_all_run_ids()
assert run_ids == ['89296095-892d-4a15-aa0d-9018d1580945']
with pytest.raises(
DagsterInstanceMigrationRequired,
match=re.escape(
'Instance is out of date and must be migrated (SqliteEventLogStorage for run '
'89296095-892d-4a15-aa0d-9018d1580945). Database is at revision None, head is '
'567bc23fd1ac. Please run `dagster instance migrate`.'
),
):
instance._event_storage.get_logs_for_run('89296095-892d-4a15-aa0d-9018d1580945')
def test_0_6_6_sqlite_migrate():
test_dir = file_relative_path(__file__, 'snapshot_0_6_6/sqlite')
assert os.path.exists(file_relative_path(__file__, 'snapshot_0_6_6/sqlite/runs.db'))
assert not os.path.exists(file_relative_path(__file__, 'snapshot_0_6_6/sqlite/history/runs.db'))
with restore_directory(test_dir):
instance = DagsterInstance.from_ref(InstanceRef.from_dir(test_dir))
instance.upgrade()
runs = instance.get_runs()
assert len(runs) == 1
run_ids = instance._event_storage.get_all_run_ids()
assert run_ids == ['89296095-892d-4a15-aa0d-9018d1580945']
instance._event_storage.get_logs_for_run('89296095-892d-4a15-aa0d-9018d1580945')
assert not os.path.exists(file_relative_path(__file__, 'snapshot_0_6_6/sqlite/runs.db'))
assert os.path.exists(file_relative_path(__file__, 'snapshot_0_6_6/sqlite/history/runs.db'))
| 42.363636
| 100
| 0.698038
| 432
| 3,262
| 4.979167
| 0.280093
| 0.009298
| 0.059507
| 0.033473
| 0.662483
| 0.662483
| 0.647606
| 0.632729
| 0.632729
| 0.615528
| 0
| 0.074961
| 0.218884
| 3,262
| 76
| 101
| 42.921053
| 0.769231
| 0.136726
| 0
| 0.584906
| 0
| 0
| 0.264245
| 0.155983
| 0
| 0
| 0
| 0
| 0.150943
| 1
| 0.056604
| false
| 0
| 0.132075
| 0
| 0.188679
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9d59aa9c4853e8590f823a63f53768b8aecbce1
| 6,899
|
py
|
Python
|
python/ray/experimental/workflow/execution.py
|
wgifford/ray
|
8acb469b047cd9b327c9477a13b030eb7357860e
|
[
"Apache-2.0"
] | null | null | null |
python/ray/experimental/workflow/execution.py
|
wgifford/ray
|
8acb469b047cd9b327c9477a13b030eb7357860e
|
[
"Apache-2.0"
] | 32
|
2021-09-04T07:08:45.000Z
|
2022-02-19T08:08:11.000Z
|
python/ray/experimental/workflow/execution.py
|
wgifford/ray
|
8acb469b047cd9b327c9477a13b030eb7357860e
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import logging
import time
from typing import Set, List, Tuple, Optional, TYPE_CHECKING
import uuid
import ray
from ray.experimental.workflow import workflow_context
from ray.experimental.workflow import workflow_storage
from ray.experimental.workflow.common import (Workflow, WorkflowStatus,
WorkflowMetaData, StepType)
from ray.experimental.workflow.step_executor import commit_step
from ray.experimental.workflow.storage import get_global_storage
from ray.experimental.workflow.workflow_access import (
flatten_workflow_output, get_or_create_management_actor,
get_management_actor)
if TYPE_CHECKING:
from ray.experimental.workflow.step_executor import WorkflowExecutionResult
logger = logging.getLogger(__name__)
def run(entry_workflow: Workflow,
workflow_id: Optional[str] = None,
overwrite: bool = True) -> ray.ObjectRef:
"""Run a workflow asynchronously.
# TODO(suquark): The current "run" always overwrite existing workflow.
# We need to fix this later.
"""
store = get_global_storage()
assert ray.is_initialized()
if workflow_id is None:
# Workflow ID format: {Entry workflow UUID}.{Unix time to nanoseconds}
workflow_id = f"{str(uuid.uuid4())}.{time.time():.9f}"
logger.info(f"Workflow job created. [id=\"{workflow_id}\", storage_url="
f"\"{store.storage_url}\"].")
with workflow_context.workflow_step_context(workflow_id,
store.storage_url):
# checkpoint the workflow
ws = workflow_storage.get_workflow_storage(workflow_id)
commit_step(ws, "", entry_workflow)
workflow_manager = get_or_create_management_actor()
ignore_existing = (entry_workflow.data.step_type != StepType.FUNCTION)
# NOTE: It is important to 'ray.get' the returned output. This
# ensures caller of 'run()' holds the reference to the workflow
# result. Otherwise if the actor removes the reference of the
# workflow output, the caller may fail to resolve the result.
result: "WorkflowExecutionResult" = ray.get(
workflow_manager.run_or_resume.remote(workflow_id,
ignore_existing))
if entry_workflow.data.step_type == StepType.FUNCTION:
return flatten_workflow_output(workflow_id,
result.persisted_output)
else:
return flatten_workflow_output(workflow_id, result.volatile_output)
# TODO(suquark): support recovery with ObjectRef inputs.
def resume(workflow_id: str) -> ray.ObjectRef:
"""Resume a workflow asynchronously. See "api.resume()" for details.
"""
storage = get_global_storage()
logger.info(f"Resuming workflow [id=\"{workflow_id}\", storage_url="
f"\"{storage.storage_url}\"].")
workflow_manager = get_or_create_management_actor()
# NOTE: It is important to 'ray.get' the returned output. This
# ensures caller of 'run()' holds the reference to the workflow
# result. Otherwise if the actor removes the reference of the
# workflow output, the caller may fail to resolve the result.
result: "WorkflowExecutionResult" = ray.get(
workflow_manager.run_or_resume.remote(
workflow_id, ignore_existing=False))
logger.info(f"Workflow job {workflow_id} resumed.")
return flatten_workflow_output(workflow_id, result.persisted_output)
def get_output(workflow_id: str, name: Optional[str]) -> ray.ObjectRef:
"""Get the output of a running workflow.
See "api.get_output()" for details.
"""
assert ray.is_initialized()
try:
workflow_manager = get_management_actor()
except ValueError as e:
raise ValueError(
"Failed to connect to the workflow management "
"actor. The workflow could have already failed. You can use "
"workflow.resume() to resume the workflow.") from e
output = ray.get(workflow_manager.get_output.remote(workflow_id, name))
return flatten_workflow_output(workflow_id, output)
def cancel(workflow_id: str) -> None:
try:
workflow_manager = get_management_actor()
ray.get(workflow_manager.cancel_workflow.remote(workflow_id))
except ValueError:
wf_store = workflow_storage.get_workflow_storage(workflow_id)
wf_store.save_workflow_meta(WorkflowMetaData(WorkflowStatus.CANCELED))
def get_status(workflow_id: str) -> Optional[WorkflowStatus]:
try:
workflow_manager = get_management_actor()
running = ray.get(
workflow_manager.is_workflow_running.remote(workflow_id))
except Exception:
running = False
if running:
return WorkflowStatus.RUNNING
store = workflow_storage.get_workflow_storage(workflow_id)
meta = store.load_workflow_meta()
if meta is None:
raise ValueError(f"No such workflow_id {workflow_id}")
return meta.status
def list_all(status_filter: Set[WorkflowStatus]
) -> List[Tuple[str, WorkflowStatus]]:
try:
workflow_manager = get_management_actor()
except ValueError:
workflow_manager = None
if workflow_manager is None:
runnings = []
else:
runnings = ray.get(workflow_manager.list_running_workflow.remote())
if WorkflowStatus.RUNNING in status_filter and len(status_filter) == 1:
return [(r, WorkflowStatus.RUNNING) for r in runnings]
runnings = set(runnings)
# Here we don't have workflow id, so use empty one instead
store = workflow_storage.get_workflow_storage("")
ret = []
for (k, s) in store.list_workflow():
if s == WorkflowStatus.RUNNING and k not in runnings:
s = WorkflowStatus.RESUMABLE
if s in status_filter:
ret.append((k, s))
return ret
def resume_all(with_failed: bool) -> List[Tuple[str, ray.ObjectRef]]:
filter_set = {WorkflowStatus.RESUMABLE}
if with_failed:
filter_set.add(WorkflowStatus.FAILED)
all_failed = list_all(filter_set)
try:
workflow_manager = get_management_actor()
except Exception as e:
raise RuntimeError("Failed to get management actor") from e
async def _resume_one(wid: str) -> Tuple[str, Optional[ray.ObjectRef]]:
try:
result: "WorkflowExecutionResult" = (
await workflow_manager.run_or_resume.remote(wid))
obj = flatten_workflow_output(wid, result.persisted_output)
return wid, obj
except Exception:
logger.error(f"Failed to resume workflow {wid}")
return (wid, None)
ret = workflow_storage.asyncio_run(
asyncio.gather(*[_resume_one(wid) for (wid, _) in all_failed]))
return [(wid, obj) for (wid, obj) in ret if obj is not None]
| 40.345029
| 79
| 0.681258
| 838
| 6,899
| 5.400955
| 0.201671
| 0.061865
| 0.031816
| 0.041759
| 0.383341
| 0.35285
| 0.292974
| 0.194432
| 0.150243
| 0.124613
| 0
| 0.000566
| 0.232208
| 6,899
| 170
| 80
| 40.582353
| 0.85388
| 0.14031
| 0
| 0.184
| 0
| 0
| 0.078885
| 0.018021
| 0
| 0
| 0
| 0.011765
| 0.016
| 1
| 0.056
| false
| 0
| 0.104
| 0
| 0.248
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9d66c8e24ecdddf4d2ecdc3b422d09645a2f485
| 3,021
|
py
|
Python
|
mro/stages/analyzer/run_differential_expression/__init__.py
|
qiangli/cellranger
|
046e24c3275cfbd4516a6ebc064594513a5c45b7
|
[
"MIT"
] | 1
|
2019-03-29T04:05:58.000Z
|
2019-03-29T04:05:58.000Z
|
mro/stages/analyzer/run_differential_expression/__init__.py
|
qiangli/cellranger
|
046e24c3275cfbd4516a6ebc064594513a5c45b7
|
[
"MIT"
] | null | null | null |
mro/stages/analyzer/run_differential_expression/__init__.py
|
qiangli/cellranger
|
046e24c3275cfbd4516a6ebc064594513a5c45b7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2017 10X Genomics, Inc. All rights reserved.
#
import cellranger.analysis.diffexp as cr_diffexp
import cellranger.analysis.io as analysis_io
from cellranger.analysis.singlegenome import SingleGenomeAnalysis
import cellranger.h5_constants as h5_constants
import cellranger.analysis.constants as analysis_constants
import cellranger.matrix as cr_matrix
import cellranger.io as cr_io
import cellranger.library_constants as lib_constants
NUM_THREADS_MIN = 4
#TODO Not clear why this stage takes > 1 thread. Martian thinks it does and kills it on long jobs
__MRO__ = """
stage RUN_DIFFERENTIAL_EXPRESSION(
in h5 matrix_h5,
in h5 clustering_h5,
in bool skip,
in int random_seed,
in int max_clusters,
out h5 diffexp_h5,
out path diffexp_csv,
src py "stages/analyzer/run_differential_expression",
) split using (
in string clustering_key,
)
"""
def split(args):
if args.skip:
return {'chunks': [{'__mem_gb': h5_constants.MIN_MEM_GB}]}
chunks = []
# FIXME: Add one for reasons unknown
matrix_mem_gb = 1.8 * cr_matrix.CountMatrix.get_mem_gb_from_matrix_h5(args.matrix_h5)
chunk_mem_gb = int(max(matrix_mem_gb, h5_constants.MIN_MEM_GB))
# HACK - give big jobs more threads in order to avoid overloading a node
threads = min(cr_io.get_thread_request_from_mem_gb(chunk_mem_gb), NUM_THREADS_MIN)
threads = 4
for key in SingleGenomeAnalysis.load_clustering_keys_from_h5(args.clustering_h5):
chunks.append({
'clustering_key': key,
'__mem_gb': chunk_mem_gb,
'__threads': threads,
})
return {'chunks': chunks, 'join': {'__mem_gb' : 1}}
def main(args, outs):
if args.skip:
return
matrix = cr_matrix.CountMatrix.load_h5_file(args.matrix_h5)
# For now, only compute for gene expression features
matrix = matrix.select_features_by_type(lib_constants.GENE_EXPRESSION_LIBRARY_TYPE)
clustering = SingleGenomeAnalysis.load_clustering_from_h5(args.clustering_h5, args.clustering_key)
diffexp = cr_diffexp.run_differential_expression(matrix, clustering.clusters)
with analysis_io.open_h5_for_writing(outs.diffexp_h5) as f:
cr_diffexp.save_differential_expression_h5(f, args.clustering_key, diffexp)
cr_diffexp.save_differential_expression_csv(args.clustering_key, diffexp, matrix, outs.diffexp_csv)
def join(args, outs, chunk_defs, chunk_outs):
if args.skip:
return
chunk_h5s = [chunk_out.diffexp_h5 for chunk_out in chunk_outs]
chunk_csv_dirs = [chunk_out.diffexp_csv for chunk_out in chunk_outs]
analysis_io.combine_h5_files(chunk_h5s, outs.diffexp_h5, [analysis_constants.ANALYSIS_H5_DIFFERENTIAL_EXPRESSION_GROUP,
analysis_constants.ANALYSIS_H5_KMEANS_DIFFERENTIAL_EXPRESSION_GROUP])
for csv_dir in chunk_csv_dirs:
cr_io.copytree(csv_dir, outs.diffexp_csv, allow_existing=True)
| 35.541176
| 125
| 0.735849
| 424
| 3,021
| 4.900943
| 0.316038
| 0.028874
| 0.034649
| 0.023099
| 0.16025
| 0.076035
| 0.023099
| 0
| 0
| 0
| 0
| 0.016353
| 0.190334
| 3,021
| 84
| 126
| 35.964286
| 0.833197
| 0.109897
| 0
| 0.087719
| 0
| 0
| 0.148825
| 0.027602
| 0
| 0
| 0
| 0.011905
| 0
| 1
| 0.052632
| false
| 0
| 0.140351
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9d95781d1bacab44253ba285649d7b99ee1e33d
| 542
|
py
|
Python
|
src/vatic_checker/config.py
|
jonkeane/vatic-checker
|
fa8aec6946dcfd3f466b62f9c00d81bc43514b22
|
[
"MIT"
] | null | null | null |
src/vatic_checker/config.py
|
jonkeane/vatic-checker
|
fa8aec6946dcfd3f466b62f9c00d81bc43514b22
|
[
"MIT"
] | null | null | null |
src/vatic_checker/config.py
|
jonkeane/vatic-checker
|
fa8aec6946dcfd3f466b62f9c00d81bc43514b22
|
[
"MIT"
] | null | null | null |
localhost = "http://localhost/" # your local host
database = "mysql://root@localhost/vaticChecker" # server://user:pass@localhost/dbname
min_training = 2 # the minimum number of training videos to be considered
recaptcha_secret = "" # recaptcha secret for verification
duplicate_annotations = False # Should the server allow for duplicate annotations?
import os.path
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# TODO: remove on server
import os
os.environ['PYTHON_EGG_CACHE'] = '/tmp/apache'
| 38.714286
| 94
| 0.745387
| 72
| 542
| 5.486111
| 0.694444
| 0.04557
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002169
| 0.149446
| 542
| 13
| 95
| 41.692308
| 0.854664
| 0.394834
| 0
| 0
| 0
| 0
| 0.246106
| 0.109034
| 0
| 0
| 0
| 0.076923
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9d96360237e53141cd11d1271cee29b6140650f
| 8,233
|
py
|
Python
|
django/utils/timezone.py
|
graingert/django
|
784d0c261c76535dc760bc8d76793d92f35c1513
|
[
"BSD-3-Clause"
] | 1
|
2015-11-11T12:20:45.000Z
|
2015-11-11T12:20:45.000Z
|
django/utils/timezone.py
|
graingert/django
|
784d0c261c76535dc760bc8d76793d92f35c1513
|
[
"BSD-3-Clause"
] | null | null | null |
django/utils/timezone.py
|
graingert/django
|
784d0c261c76535dc760bc8d76793d92f35c1513
|
[
"BSD-3-Clause"
] | null | null | null |
"""Timezone helper functions.
This module uses pytz when it's available and fallbacks when it isn't.
"""
from datetime import datetime, timedelta, tzinfo
from threading import local
import time as _time
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
__all__ = [
'utc', 'get_default_timezone', 'get_current_timezone',
'activate', 'deactivate', 'override',
'is_naive', 'is_aware', 'make_aware', 'make_naive',
]
# UTC and local time zones
ZERO = timedelta(0)
class UTC(tzinfo):
"""
UTC implementation taken from Python's docs.
Used only when pytz isn't available.
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class LocalTimezone(tzinfo):
"""
Local time implementation taken from Python's docs.
Used only when pytz isn't available, and most likely inaccurate. If you're
having trouble with this class, don't waste your time, just install pytz.
"""
def __init__(self):
# This code is moved in __init__ to execute it as late as possible
# See get_default_timezone().
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
tzinfo.__init__(self)
def __repr__(self):
return "<LocalTimezone>"
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
utc = pytz.utc if pytz else UTC()
"""UTC time zone as a tzinfo instance."""
# In order to avoid accessing the settings at compile time,
# wrap the expression in a function and cache the result.
_localtime = None
def get_default_timezone():
"""
Returns the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
See also :func:`get_current_timezone`.
"""
global _localtime
if _localtime is None:
if isinstance(settings.TIME_ZONE, basestring) and pytz is not None:
_localtime = pytz.timezone(settings.TIME_ZONE)
else:
_localtime = LocalTimezone()
return _localtime
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""
Returns the name of the default time zone.
"""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""
Returns the currently active time zone as a tzinfo instance.
"""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""
Returns the name of the currently active time zone.
"""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""
Returns the name of ``timezone``.
"""
try:
# for pytz timezones
return timezone.zone
except AttributeError:
# for regular tzinfo objects
local_now = datetime.now(timezone)
return timezone.tzname(local_now)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Sets the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name. If it is a time zone name, pytz is required.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, basestring) and pytz is not None:
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unsets the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(object):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses ``~django.utils.timezone.activate()``
to set the timezone on entry, and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, Django enables the default time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
self.old_timezone = getattr(_active, 'value', None)
def __enter__(self):
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is not None:
_active.value = self.old_timezone
else:
del _active.value
# Templates
def template_localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (isinstance(value, datetime)
and (settings.USE_TZ if use_tz is None else use_tz)
and not is_naive(value)
and getattr(value, 'convert_to_local_time', True))
return localtime(value) if should_convert else value
# Utilities
def localtime(value, timezone=None):
"""
Converts an aware datetime.datetime to local time.
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if timezone is None:
timezone = get_current_timezone()
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value
def now():
"""
Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
if settings.USE_TZ:
# timeit shows that datetime.now(tz=utc) is 24% slower
return datetime.utcnow().replace(tzinfo=utc)
else:
return datetime.now()
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize'):
# available for pytz time zones
return timezone.localize(value, is_dst=None)
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone)
def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value.replace(tzinfo=None)
| 28.195205
| 81
| 0.66197
| 1,098
| 8,233
| 4.839709
| 0.220401
| 0.034626
| 0.023711
| 0.009785
| 0.285284
| 0.252352
| 0.228453
| 0.176139
| 0.125329
| 0.125329
| 0
| 0.000972
| 0.24997
| 8,233
| 291
| 82
| 28.292096
| 0.859595
| 0.383457
| 0
| 0.242424
| 0
| 0
| 0.045045
| 0.004505
| 0
| 0
| 0
| 0
| 0
| 1
| 0.204545
| false
| 0
| 0.045455
| 0.045455
| 0.469697
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9e551f94d290cc9b470d1fddfc0e91666dab7ba
| 444
|
py
|
Python
|
setup.py
|
zhanghang1989/notedown
|
b0fa1eac88d1cd7fa2261d6c454f82669e6f552b
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
zhanghang1989/notedown
|
b0fa1eac88d1cd7fa2261d6c454f82669e6f552b
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
zhanghang1989/notedown
|
b0fa1eac88d1cd7fa2261d6c454f82669e6f552b
|
[
"BSD-2-Clause"
] | null | null | null |
from setuptools import setup
# create __version__
exec(open('./_version.py').read())
setup(
name="notedown",
version=__version__,
description="Convert markdown to IPython notebook.",
author="Aaron O'Leary",
author_email='[email protected]',
url='http://github.com/aaren/notedown',
install_requires=['ipython', ],
entry_points={
'console_scripts': [
'notedown = notedown:cli',
],
}
)
| 22.2
| 56
| 0.628378
| 47
| 444
| 5.659574
| 0.787234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.218468
| 444
| 19
| 57
| 23.368421
| 0.766571
| 0.040541
| 0
| 0
| 0
| 0
| 0.377358
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.0625
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9e62b20786a73ca86ccde01bde160623cc32657
| 3,710
|
py
|
Python
|
genyrator/entities/Template.py
|
jumblesale/genyrator
|
c4429f689e92e8447b0b944e7d9b434f99cae51d
|
[
"MIT"
] | 1
|
2020-07-01T16:54:39.000Z
|
2020-07-01T16:54:39.000Z
|
genyrator/entities/Template.py
|
jumblesale/genyrator
|
c4429f689e92e8447b0b944e7d9b434f99cae51d
|
[
"MIT"
] | 10
|
2018-11-16T15:04:21.000Z
|
2021-06-01T22:27:38.000Z
|
genyrator/entities/Template.py
|
jumblesale/genyrator
|
c4429f689e92e8447b0b944e7d9b434f99cae51d
|
[
"MIT"
] | 2
|
2018-08-08T10:42:35.000Z
|
2019-07-25T11:56:06.000Z
|
from typing import List, Optional, NewType, Tuple, NamedTuple, Type
import attr
from jinja2 import Template as JinjaTemplate, StrictUndefined
from genyrator.entities.Entity import Entity
from genyrator.path import create_relative_path
OutPath = NewType('OutPath', Tuple[List[str], str])
Import = NamedTuple('Import',
[('module_name', str),
('imports', List[str]), ])
@attr.s
class Template(object):
template_name: str = attr.ib()
template_file_name: str = attr.ib()
template_file_path: List[str] = attr.ib()
relative_path: List[str] = attr.ib()
out_path: Optional[OutPath] = attr.ib()
def create_template(self):
path = create_relative_path(
[*self.template_file_path, self.template_file_name]
)
with open(path) as f:
template = JinjaTemplate(f.read(), undefined=StrictUndefined)
return template
def render(self):
return self.create_template().render(template=self)
def create_template(
constructor,
template_path: Optional[List[str]] = None,
out_path: Optional[OutPath] = None,
**kwargs,
) -> Template:
relative_path = template_path[0:-1]
path = ['genyrator', 'templates'] + relative_path
template_name = template_path[-1]
return constructor(
template_name=template_name,
template_file_name='{}.j2'.format(template_name),
template_file_path=path,
out_path=out_path,
relative_path=relative_path,
**kwargs,
)
@attr.s
class RootInit(Template):
db_import_path: str = attr.ib()
module_name: str = attr.ib()
@attr.s
class RootSchema(Template):
module_name: str = attr.ib()
entities: List[Entity] = attr.ib()
@attr.s
class ConvertDict(Template):
module_name: str = attr.ib()
@attr.s
class SQLAlchemyModel(Template):
module_name: str = attr.ib()
db_import_path: str = attr.ib()
entity: Entity = attr.ib()
@attr.s
class ModelToDict(Template):
module_name: str = attr.ib()
@attr.s
class Config(Template):
module_name: str = attr.ib()
@attr.s
class SQLAlchemyModelInit(Template):
module_name: str = attr.ib()
db_import_path: str = attr.ib()
imports: List[Import] = attr.ib()
@attr.s
class RestplusModel(Template):
entity: Entity = attr.ib()
@attr.s
class Resource(Template):
module_name: str = attr.ib()
db_import_path: str = attr.ib()
entity: Entity = attr.ib()
restplus_template: str = attr.ib()
TypeOption: Type = attr.ib()
@attr.s
class ResourcesInit(Template):
entities: List[Entity] = attr.ib()
module_name: str = attr.ib()
api_name: str = attr.ib()
api_description: str = attr.ib()
@attr.s
class DomainModel(Template):
entity: Entity = attr.ib()
module_name: str = attr.ib()
def sqlalchemy_model_imports(self):
return list(set([
rel.target_entity_class_name
for rel in self.entity.relationships
]))
@attr.s
class ConvertProperties(Template):
module_name: str = attr.ib()
@attr.s
class ConvertModels(Template):
module_name: str = attr.ib()
@attr.s
class JoinEntities(Template):
module_name: str = attr.ib()
@attr.s
class ConvertDictToMarshmallow(Template):
module_name: str = attr.ib()
db_import_path: str = attr.ib()
@attr.s
class Fixture(Template):
db_import_path: str = attr.ib()
module_name: str = attr.ib()
entity: Entity = attr.ib()
| 24.090909
| 73
| 0.616712
| 444
| 3,710
| 4.990991
| 0.175676
| 0.102888
| 0.113718
| 0.105596
| 0.421029
| 0.361913
| 0.304152
| 0.278881
| 0.24639
| 0.146209
| 0
| 0.001836
| 0.265768
| 3,710
| 153
| 74
| 24.248366
| 0.811674
| 0
| 0
| 0.419643
| 0
| 0
| 0.014555
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.133929
| 0.017857
| 0.696429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
d9e8867f9d8fa5dbea3f62a0b298eac5f535d37a
| 9,499
|
py
|
Python
|
src/bots/test/test_inputs.py
|
drewbitt/lightnovel-crawler
|
fa9546ad9dcff49c75296b0b8772f6578689adcc
|
[
"Apache-2.0"
] | 1
|
2019-03-10T13:02:23.000Z
|
2019-03-10T13:02:23.000Z
|
src/bots/test/test_inputs.py
|
drewbitt/lightnovel-crawler
|
fa9546ad9dcff49c75296b0b8772f6578689adcc
|
[
"Apache-2.0"
] | null | null | null |
src/bots/test/test_inputs.py
|
drewbitt/lightnovel-crawler
|
fa9546ad9dcff49c75296b0b8772f6578689adcc
|
[
"Apache-2.0"
] | null | null | null |
from base64 import decodestring as b64decode
allowed_failures = [
'https://ranobelib.me/',
'https://www.aixdzs.com/',
'https://webnovelindonesia.com/',
b64decode("aHR0cHM6Ly9jb21yYWRlbWFvLmNvbS8=".encode()).decode()
]
test_user_inputs = {
b64decode("aHR0cHM6Ly9jb21yYWRlbWFvLmNvbS8=".encode()).decode(): [
b64decode(
"aHR0cHM6Ly9jb21yYWRlbWFvLmNvbS9ub3ZlbC90c3VydWdpLW5vLWpvb3UtdG8tcmFrdWluLW5vLWtvLw==".encode()).decode()
],
'https://novelsrock.com/': [
'https://novelsrock.com/novel/the-returner/',
'kuro'
],
'http://gravitytales.com/': [
'http://gravitytales.com/posts/novel/a-dragons-curiosity'
],
'http://novelfull.com/': [
'http://novelfull.com/dungeon-defense.html',
'Sinister Ex Girlfriend',
],
'http://www.machinenoveltranslation.com/': [
'http://www.machinenoveltranslation.com/a-thought-through-eternity',
],
'http://zenithnovels.com/': [
'http://zenithnovels.com/infinity-armament/',
],
'https://anythingnovel.com/': [
'https://anythingnovel.com/novel/king-of-gods/',
],
'https://boxnovel.com/': [
'https://boxnovel.com/novel/the-rest-of-my-life-is-for-you/',
'cultivation chat',
],
'https://crescentmoon.blog/': [
'https://crescentmoon.blog/dark-blue-and-moonlight/',
],
'https://litnet.com/': [
'https://litnet.com/en/book/candy-lips-1-b106232',
'candy lips',
],
'https://lnmtl.com/': [
'https://lnmtl.com/novel/the-strongest-dan-god',
],
'https://m.chinesefantasynovels.com/': [
'https://m.chinesefantasynovels.com/3838/',
],
'https://m.novelspread.com/': [
'https://m.novelspread.com/novel/the-legend-of-the-concubine-s-daughter-minglan',
],
'https://m.romanticlovebooks.com/': [
'https://m.romanticlovebooks.com/xuanhuan/207.html',
],
'http://www.tiknovel.com/': [
'http://www.tiknovel.com/book/index?id=717',
],
'https://www.wuxiaworld.co/': [
'sword',
],
'https://m.wuxiaworld.co/': [
'https://m.wuxiaworld.co/Reincarnation-Of-The-Strongest-Sword-God/',
],
'https://meionovel.id/': [
'https://meionovel.id/novel/the-legendary-mechanic/',
],
'https://mtled-novels.com/': [
'https://mtled-novels.com/novels/great-ruler/',
'great ruler'
],
'https://bestlightnovel.com/': [
'https://bestlightnovel.com/novel_888103800',
'martial'
],
'https://novelplanet.com/': [
'https://novelplanet.com/Novel/Returning-from-the-Immortal-World',
'immortal'
],
'https://www.volarenovels.com/': [
'https://www.volarenovels.com/novel/adorable-creature-attacks',
],
'https://webnovel.online/': [
'https://webnovel.online/full-marks-hidden-marriage-pick-up-a-son-get-a-free-husband',
],
'https://www.idqidian.us/': [
'https://www.idqidian.us/novel/peerless-martial-god/'
],
'https://www.novelall.com/': [
'https://www.novelall.com/novel/Virtual-World-Close-Combat-Mage.html',
'combat'
],
'https://www.novelspread.com/': [
'https://www.novelspread.com/novel/the-legend-of-the-concubine-s-daughter-minglan'
],
'https://www.readlightnovel.org/': [
'https://www.readlightnovel.org/top-furious-doctor-soldier'
],
'https://www.romanticlovebooks.com/': [
'https://www.romanticlovebooks.com/xianxia/251.html'
],
'https://www.royalroad.com/': [
'https://www.royalroad.com/fiction/21220/mother-of-learning',
'mother'
],
'https://www.scribblehub.com/': [
'https://www.scribblehub.com/series/73550/modern-life-of-the-exalted-immortal/',
'cultivation'
],
'https://www.webnovel.com/': [
'https://www.webnovel.com/book/8212987205006305/Trial-Marriage-Husband%3A-Need-to-Work-Hard',
'martial',
],
'https://www.worldnovel.online/': [
'https://www.worldnovel.online/novel/solo-leveling/',
],
'https://www.wuxiaworld.co/': [
'https://www.wuxiaworld.co/Reincarnation-Of-The-Strongest-Sword-God/',
'sword'
],
'https://rewayat.club/': [
'https://rewayat.club/novel/almighty-sword-domain/'
],
'https://www.wuxiaworld.com/': [
'https://www.wuxiaworld.com/novel/martial-god-asura',
'martial',
],
'https://creativenovels.com/': [
'https://creativenovels.com/novel/eternal-reverence/',
],
'https://www.tapread.com/': [
'https://www.tapread.com/book/detail/80',
],
'http://www.tapread.com/': [
'http://www.tapread.com/book/detail/80',
],
'https://readnovelfull.com/': [
'https://readnovelfull.com/lord-of-all-realms.html',
'cultivation'
],
'https://myoniyonitranslations.com/': [
'https://myoniyonitranslations.com/top-management/',
'https://myoniyonitranslations.com/category/god-of-tennis',
],
'https://babelnovel.com/': [
'https://babelnovel.com/books/ceo-let-me-go',
'dazzle Good'
],
'https://wuxiaworld.online/': [
'https://wuxiaworld.online/trial-marriage-husband-need-to-work-hard',
'cultivation',
],
'https://www.novelv.com/': [
'https://www.novelv.com/0/349/'
],
'http://fullnovel.live/': [
'http://fullnovel.live/novel-a-will-eternal',
'will eternal',
],
'https://www.noveluniverse.com/': [
'https://www.noveluniverse.com/index/novel/info/id/15.html'
],
'https://novelraw.blogspot.com/': [
'https://novelraw.blogspot.com/2019/03/dragon-king-son-in-law-mtl.html'
],
'https://light-novel.online/': [
'https://light-novel.online/great-tyrannical-deity',
'tyrannical'
],
'https://www.rebirth.online/': [
'https://www.rebirth.online/novel/upside-down'
],
'https://www.jieruihao.cn/': [
'https://www.jieruihao.cn/novel/against-the-gods/',
],
'https://www.wattpad.com/': [
'https://www.wattpad.com/story/87505567-loving-mr-jerkface-%E2%9C%94%EF%B8%8F'
],
'https://novelgo.id/': [
'https://novelgo.id/novel/the-mightiest-leveling-system/'
],
'https://yukinovel.me/': [
'https://yukinovel.me/novel/the-second-coming-of-avarice/',
],
'https://www.asianhobbyist.com/': [
'https://www.asianhobbyist.com/series/that-time-i-got-reincarnated-as-a-slime/'
],
'https://kisslightnovels.info/': [
'https://kisslightnovels.info/novel/solo-leveling/'
],
'https://novelonlinefull.com/': [
'https://novelonlinefull.com/novel/abo1520855001564322110'
],
'https://www.machine-translation.org/': [
'https://www.machine-translation.org/novel/bace21c9b10d34e9/world-of-cultivation.html'
],
'https://www.fanfiction.net/': [
'https://www.fanfiction.net/s/7268451/1/Facebook-For-wizards'
],
'https://www.mtlnovel.com/': [
'https://www.mtlnovel.com/trapped-in-a-typical-idol-drama/'
],
'https://wordexcerpt.com/': [
'https://wordexcerpt.com/series/transmigration-raising-the-child-of-the-male-lead-boss/'
],
'https://www.translateindo.com/': [
'https://www.translateindo.com/demon-wang-golden-status-favoured-fei/'
],
'https://ranobelib.me/': [
'https://ranobelib.me/sozvezdie-klinka'
],
'https://novelringan.com/': [
'https://novelringan.com/series/the-most-loving-marriage-in-history-master-mus-pampered-wife/'
],
'https://wuxiaworld.site/': [
'https://wuxiaworld.site/novel/only-i-level-up/'
],
'https://id.mtlnovel.com/': [
'https://id.mtlnovel.com/the-strongest-plane-becomes-god/'
],
'https://www.shinsori.com/': [
'https://www.shinsori.com/akuyaku-reijou-ni-nanka-narimasen/'
],
'https://www.flying-lines.com/': [
'https://www.flying-lines.com/novel/one-useless-rebirth'
],
'https://book.qidian.com/': [
'https://book.qidian.com/info/1016597088'
],
'https://kiss-novel.com/': [
'https://kiss-novel.com/the-first-order'
],
'https://www.machine-translation.org/': [
'https://www.machine-translation.org/novel/a5eee127d75da0d2/long-live-summons.html'
],
'https://www.aixdzs.com/': [
'https://www.aixdzs.com/d/66/66746/'
],
'https://webnovelonline.com/': [
'https://webnovelonline.com/novel/the_anarchic_consort'
],
'https://4scanlation.com/': [
'https://4scanlation.com/tensei-shitara-slime-datta-ken-wn/'
],
'https://listnovel.com/': [
'https://listnovel.com/novel/my-sassy-crown-princess/'
],
'https://tomotranslations.com/': [
'https://tomotranslations.com/this-hero-is-invincible-but-too-cautious/'
],
'https://www.wuxialeague.com/': [
'https://www.wuxialeague.com/novel/245/'
],
'http://liberspark.com/': [
'http://liberspark.com/novel/black-irons-glory'
],
'https://webnovelindonesia.com/': [
'https://webnovelindonesia.com/nv/almighty-student'
],
'https://webnovelindonesia.com/': [
'https://webnovelindonesia.com/nv/almighty-student'
],
'http://tiknovel.com/': [
'http://tiknovel.com/book/index?id=717'
],
'http://boxnovel.org/': [
'http://boxnovel.org/novel/martial-god-asura'
]
}
| 34.922794
| 117
| 0.596694
| 1,009
| 9,499
| 5.611497
| 0.331021
| 0.08195
| 0.036913
| 0.018368
| 0.11798
| 0.102437
| 0.084776
| 0.084776
| 0.068174
| 0.044507
| 0
| 0.024765
| 0.192336
| 9,499
| 271
| 118
| 35.051661
| 0.713243
| 0
| 0
| 0.33829
| 0
| 0.052045
| 0.713759
| 0.015581
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.003717
| 0
| 0.003717
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9ea7ffbac1c307ae6a48a478a94b12a44b81de1
| 3,325
|
py
|
Python
|
backend/radar/engine/body_objects.py
|
me-anton/radar-app
|
cc7d1e876e0ce9b6173b6d7b484d5553e247166e
|
[
"MIT"
] | null | null | null |
backend/radar/engine/body_objects.py
|
me-anton/radar-app
|
cc7d1e876e0ce9b6173b6d7b484d5553e247166e
|
[
"MIT"
] | null | null | null |
backend/radar/engine/body_objects.py
|
me-anton/radar-app
|
cc7d1e876e0ce9b6173b6d7b484d5553e247166e
|
[
"MIT"
] | null | null | null |
import logging
import json
from dataclasses import dataclass
from redis import Redis
from typing import Iterable, Tuple, List, Iterator, Union, Dict
from typing_extensions import TypedDict
from backend import settings
from caching.scripts import RedisScriptsPool
from share.metaclasses import Singleton
from radar.models import AlienBody
from radar.validation import validate_body_str_profile
logger = logging.getLogger(__name__)
BodiesUpdate = TypedDict('BodiesUpdate', {'dropped_keys': List[str],
'new_records': Dict[str, str]})
@dataclass(frozen=True)
class BodyObject:
key: str
matrix: List[List[str]]
width: int
height: int
@staticmethod
def generate(key: str, body: str) -> 'BodyObject':
line_list = body.splitlines()
matrix = [list(line) for line in line_list]
return BodyObject(key=key, matrix=matrix,
width=len(matrix[0]), height=len(matrix))
class BodyObjectsPool(metaclass=Singleton):
"""
An object for getting BodyObject instances from database or cache
"""
body_key_prefix = 'body:'
body_lookup_pattern = body_key_prefix + '*'
body_expiration = 10 # in seconds
def __init__(self, num_of_default_bodies=3):
self.num_of_default_bodies = num_of_default_bodies
self.__default_bodies: Tuple[BodyObject, ...] = \
self._generate_defaults(num_of_default_bodies)
self._redis = Redis(host=settings.REDIS_HOSTNAME)
self._scripts = RedisScriptsPool()
def add_body(self, body: Union[str, bytes], body_id: str) -> None:
"""Cache the requested body string in Redis db"""
validate_body_str_profile(body)
key = self.make_body_key(body_id)
self._redis.set(key, body, self.body_expiration)
def ping_body(self, body_id: str):
"""Reset expiration time of a body"""
key = self.make_body_key(body_id)
self._redis.expire(key, self.body_expiration)
def update_bodies(self, known_bodies_keys: Iterable[str],
max_capacity: int) -> BodiesUpdate:
"""
Give update on state of body objects' records in Redis db
:param known_bodies_keys: redis keys of already known bodies
:param max_capacity: maximum relevant for requester number of bodies
including already known ones
"""
return json.loads(
self._scripts.update_records(keys=known_bodies_keys,
args=[max_capacity,
self.body_lookup_pattern])
)
def make_body_key(self, body_id: str):
return self.body_key_prefix + body_id
@property
def first(self):
return self._get_default(0)
@property
def second(self):
return self._get_default(1)
@property
def third(self):
return self._get_default(2)
def _get_default(self, index) -> BodyObject:
return self.__default_bodies[index]
@staticmethod
def _generate_defaults(num_of_defaults):
logger.info('Generating default bodies')
query = AlienBody.objects.filter(id__lte=num_of_defaults)
return tuple(BodyObject.generate(str(body.id), body.body_str)
for body in query)
| 33.25
| 76
| 0.657143
| 406
| 3,325
| 5.135468
| 0.320197
| 0.026859
| 0.023022
| 0.034532
| 0.11223
| 0.035492
| 0.035492
| 0.035492
| 0.035492
| 0.035492
| 0
| 0.002837
| 0.258045
| 3,325
| 99
| 77
| 33.585859
| 0.842319
| 0.111278
| 0
| 0.101449
| 0
| 0
| 0.026371
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15942
| false
| 0
| 0.15942
| 0.072464
| 0.565217
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
d9ec7fb034397cf9a445f613d02c81768a1461eb
| 3,410
|
py
|
Python
|
bokeh/client/util.py
|
areaweb/bokeh
|
9d131e45d626a912e85aee5b2647139c194dc893
|
[
"BSD-3-Clause"
] | 1
|
2021-01-31T22:13:13.000Z
|
2021-01-31T22:13:13.000Z
|
bokeh/client/util.py
|
adsbxchange/bokeh
|
47aa8f8420944c47e876c1c36be182d257c14b87
|
[
"BSD-3-Clause"
] | 1
|
2017-01-12T00:37:38.000Z
|
2017-01-12T00:37:38.000Z
|
bokeh/client/util.py
|
adsbxchange/bokeh
|
47aa8f8420944c47e876c1c36be182d257c14b87
|
[
"BSD-3-Clause"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Internal utility functions used by ``bokeh.client``
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
from bokeh.util.api import public, internal ; public, internal
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Public API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Internal API
#-----------------------------------------------------------------------------
@internal((1,0,0))
def server_url_for_websocket_url(url):
''' Convert an ``ws(s)`` URL for a Bokeh server into the appropriate
``http(s)`` URL for the websocket endpoint.
Args:
url (str):
An ``ws(s)`` URL ending in ``/ws``
Returns:
str:
The corresponding ``http(s)`` URL.
Raises:
ValueError:
If the input URL is not of the proper form.
'''
if url.startswith("ws:"):
reprotocoled = "http" + url[2:]
elif url.startswith("wss:"):
reprotocoled = "https" + url[3:]
else:
raise ValueError("URL has non-websocket protocol " + url)
if not reprotocoled.endswith("/ws"):
raise ValueError("websocket URL does not end in /ws")
return reprotocoled[:-2]
@internal((1,0,0))
def websocket_url_for_server_url(url):
''' Convert an ``http(s)`` URL for a Bokeh server websocket endpoint into
the appropriate ``ws(s)`` URL
Args:
url (str):
An ``http(s)`` URL
Returns:
str:
The corresponding ``ws(s)`` URL ending in ``/ws``
Raises:
ValueError:
If the input URL is not of the proper form.
'''
if url.startswith("http:"):
reprotocoled = "ws" + url[4:]
elif url.startswith("https:"):
reprotocoled = "wss" + url[5:]
else:
raise ValueError("URL has unknown protocol " + url)
if reprotocoled.endswith("/"):
return reprotocoled + "ws"
else:
return reprotocoled + "/ws"
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 31.574074
| 82
| 0.389443
| 271
| 3,410
| 4.830258
| 0.383764
| 0.024446
| 0.018335
| 0.016807
| 0.210848
| 0.151261
| 0.097785
| 0.097785
| 0.097785
| 0.097785
| 0
| 0.006702
| 0.168622
| 3,410
| 107
| 83
| 31.869159
| 0.455026
| 0.65044
| 0
| 0.185185
| 0
| 0
| 0.123928
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.111111
| 0
| 0.296296
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d9f04eac1f39d4c14950ae0caf3dff21f18defd4
| 84,990
|
py
|
Python
|
source/browseMode.py
|
neal-hub/nvda-test
|
4c3a67b2eafa9721c5de3f671d10e60ab2d43865
|
[
"bzip2-1.0.6"
] | 1
|
2022-02-20T23:10:39.000Z
|
2022-02-20T23:10:39.000Z
|
source/browseMode.py
|
neal-hub/nvda-test
|
4c3a67b2eafa9721c5de3f671d10e60ab2d43865
|
[
"bzip2-1.0.6"
] | null | null | null |
source/browseMode.py
|
neal-hub/nvda-test
|
4c3a67b2eafa9721c5de3f671d10e60ab2d43865
|
[
"bzip2-1.0.6"
] | null | null | null |
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2007-2021 NV Access Limited, Babbage B.V., James Teh, Leonard de Ruijter,
# Thomas Stivers, Accessolutions, Julien Cochuyt
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
from typing import Any, Callable, Union
import os
import itertools
import collections
import winsound
import time
import weakref
import wx
import core
from logHandler import log
import documentBase
import review
import scriptHandler
import eventHandler
import nvwave
import queueHandler
import gui
import ui
import cursorManager
from scriptHandler import script, isScriptWaiting, willSayAllResume
import aria
import controlTypes
from controlTypes import OutputReason
import config
import textInfos
import braille
import vision
import speech
from speech import sayAll
import treeInterceptorHandler
import inputCore
import api
import gui.guiHelper
from gui.dpiScalingHelper import DpiScalingHelperMixinWithoutInit
from NVDAObjects import NVDAObject
import gui.contextHelp
from abc import ABCMeta, abstractmethod
import globalVars
from typing import Optional
def reportPassThrough(treeInterceptor,onlyIfChanged=True):
"""Reports the pass through mode if it has changed.
@param treeInterceptor: The current Browse Mode treeInterceptor.
@type treeInterceptor: L{BrowseModeTreeInterceptor}
@param onlyIfChanged: if true reporting will not happen if the last reportPassThrough reported the same thing.
@type onlyIfChanged: bool
"""
if not onlyIfChanged or treeInterceptor.passThrough != reportPassThrough.last:
if config.conf["virtualBuffers"]["passThroughAudioIndication"]:
sound = "focusMode.wav" if treeInterceptor.passThrough else "browseMode.wav"
nvwave.playWaveFile(os.path.join(globalVars.appDir, "waves", sound))
else:
if treeInterceptor.passThrough:
# Translators: The mode to interact with controls in documents
ui.message(_("Focus mode"))
else:
# Translators: The mode that presents text in a flat representation
# that can be navigated with the cursor keys like in a text document
ui.message(_("Browse mode"))
reportPassThrough.last = treeInterceptor.passThrough
reportPassThrough.last = False
def mergeQuickNavItemIterators(iterators,direction="next"):
"""
Merges multiple iterators that emit L{QuickNavItem} objects, yielding them from first to last.
They are sorted using min or max (__lt__ should be implemented on the L{QuickNavItem} objects).
@param iters: the iterators you want to merge.
@type iters: sequence of iterators that emit L{QuicknavItem} objects.
@param direction: the direction these iterators are searching (e.g. next, previous)
@type direction: string
"""
finder=min if direction=="next" else max
curValues=[]
# Populate a list with all iterators and their corisponding first value
for it in iterators:
try:
val=next(it)
except StopIteration:
continue
curValues.append((it,val))
# Until all iterators have been used up,
# Find the first (minimum or maximum) of all the values,
# emit that, and update the list with the next available value for the iterator whose value was emitted.
while len(curValues)>0:
first=finder(curValues,key=lambda x: x[1])
curValues.remove(first)
it,val=first
yield val
try:
newVal=next(it)
except StopIteration:
continue
curValues.append((it,newVal))
class QuickNavItem(object, metaclass=ABCMeta):
""" Emitted by L{BrowseModeTreeInterceptor._iterNodesByType}, this represents one of many positions in a browse mode document, based on the type of item being searched for (e.g. link, heading, table etc)."""
itemType=None #: The type of items searched for (e.g. link, heading, table etc)
label=None #: The label that should represent this item in the Elements list.
isAfterSelection=False #: Is this item positioned after the caret in the document? Used by the elements list to place its own selection.
def __init__(self,itemType,document):
"""
@param itemType: the type that was searched for (e.g. link, heading, table etc)
@type itemType: string
@param document: the browse mode document this item is a part of.
@type document: L{BrowseModeTreeInterceptor}
"""
self.itemType=itemType
self.document=document
@abstractmethod
def isChild(self,parent):
"""
Is this item a child of the given parent?
This is used when representing items in a hierarchical tree structure, such as the Elements List.
@param parent: the item of whom this item may be a child of.
@type parent: L{QuickNavItem}
@return: True if this item is a child, false otherwise.
@rtype: bool
"""
raise NotImplementedError
@abstractmethod
def report(self,readUnit=None):
"""
Reports the contents of this item.
@param readUnit: the optional unit (e.g. line, paragraph) that should be used to announce the item position when moved to. If not given, then the full sise of the item is used.
@type readUnit: a L{textInfos}.UNIT_* constant.
"""
raise NotImplementedError
@abstractmethod
def moveTo(self):
"""
Moves the browse mode caret or focus to this item.
"""
raise NotImplementedError
def activate(self):
"""
Activates this item's position. E.g. follows a link, presses a button etc.
"""
raise NotImplementedError
def rename(self,newName):
"""
Renames this item with the new name.
"""
raise NotImplementedError
@property
def isRenameAllowed(self):
return False
class TextInfoQuickNavItem(QuickNavItem):
""" Represents a quick nav item in a browse mode document who's positions are represented by a L{textInfos.TextInfo}. """
def __init__(self,itemType,document,textInfo):
"""
See L{QuickNavItem.__init__} for itemType and document argument definitions.
@param textInfo: the textInfo position this item represents.
@type textInfo: L{textInfos.TextInfo}
"""
self.textInfo=textInfo
super(TextInfoQuickNavItem,self).__init__(itemType,document)
def __lt__(self,other):
return self.textInfo.compareEndPoints(other.textInfo,"startToStart")<0
@property
def obj(self):
return self.textInfo.basePosition if isinstance(self.textInfo.basePosition,NVDAObject) else None
@property
def label(self):
return self.textInfo.text.strip()
def isChild(self,parent):
if parent.textInfo.isOverlapping(self.textInfo):
return True
return False
def report(self,readUnit=None):
info=self.textInfo
# If we are dealing with a form field, ensure we don't read the whole content if it's an editable text.
if self.itemType == "formField":
if self.obj.role == controlTypes.Role.EDITABLETEXT:
readUnit = textInfos.UNIT_LINE
if readUnit:
fieldInfo = info.copy()
info.collapse()
info.move(readUnit, 1, endPoint="end")
if info.compareEndPoints(fieldInfo, "endToEnd") > 0:
# We've expanded past the end of the field, so limit to the end of the field.
info.setEndPoint(fieldInfo, "endToEnd")
speech.speakTextInfo(info, reason=OutputReason.QUICKNAV)
def activate(self):
self.textInfo.obj._activatePosition(info=self.textInfo)
def moveTo(self):
if self.document.passThrough and getattr(self, "obj", False):
if controlTypes.State.FOCUSABLE in self.obj.states:
self.obj.setFocus()
return
self.document.passThrough = False
reportPassThrough(self.document)
info = self.textInfo.copy()
info.collapse()
self.document._set_selection(info, reason=OutputReason.QUICKNAV)
@property
def isAfterSelection(self):
caret=self.document.makeTextInfo(textInfos.POSITION_CARET)
return self.textInfo.compareEndPoints(caret, "startToStart") > 0
def _getLabelForProperties(self, labelPropertyGetter: Callable[[str], Optional[Any]]):
"""
Fetches required properties for this L{TextInfoQuickNavItem} and constructs a label to be shown in an elements list.
This can be used by subclasses to implement the L{label} property.
@Param labelPropertyGetter: A callable taking 1 argument, specifying the property to fetch.
For example, if L{itemType} is landmark, the callable must return the landmark type when "landmark" is passed as the property argument.
Alternative property names might be name or value.
The callable must return None if the property doesn't exist.
An expected callable might be get method on a L{Dict},
or "lambda property: getattr(self.obj, property, None)" for an L{NVDAObject}.
"""
content = self.textInfo.text.strip()
if self.itemType == "heading":
# Output: displayed text of the heading.
return content
labelParts = None
name = labelPropertyGetter("name")
if self.itemType == "landmark":
landmark = aria.landmarkRoles.get(labelPropertyGetter("landmark"))
# Example output: main menu; navigation
labelParts = (name, landmark)
else:
role: Union[controlTypes.Role, int] = labelPropertyGetter("role")
role = controlTypes.Role(role)
roleText = role.displayString
# Translators: Reported label in the elements list for an element which which has no name and value
unlabeled = _("Unlabeled")
realStates = labelPropertyGetter("states")
labeledStates = " ".join(controlTypes.processAndLabelStates(role, realStates, OutputReason.FOCUS))
if self.itemType == "formField":
if role in (
controlTypes.Role.BUTTON,
controlTypes.Role.DROPDOWNBUTTON,
controlTypes.Role.TOGGLEBUTTON,
controlTypes.Role.SPLITBUTTON,
controlTypes.Role.MENUBUTTON,
controlTypes.Role.DROPDOWNBUTTONGRID,
controlTypes.Role.TREEVIEWBUTTON
):
# Example output: Mute; toggle button; pressed
labelParts = (content or name or unlabeled, roleText, labeledStates)
else:
# Example output: Find a repository...; edit; has auto complete; NVDA
labelParts = (name or unlabeled, roleText, labeledStates, content)
elif self.itemType in ("link", "button"):
# Example output: You have unread notifications; visited
labelParts = (content or name or unlabeled, labeledStates)
if labelParts:
label = "; ".join(lp for lp in labelParts if lp)
else:
label = content
return label
class BrowseModeTreeInterceptor(treeInterceptorHandler.TreeInterceptor):
scriptCategory = inputCore.SCRCAT_BROWSEMODE
_disableAutoPassThrough = False
APPLICATION_ROLES = (controlTypes.Role.APPLICATION, controlTypes.Role.DIALOG)
def _get_currentNVDAObject(self):
raise NotImplementedError
def _get_currentFocusableNVDAObject(self):
return self.makeTextInfo(textInfos.POSITION_CARET).focusableNVDAObjectAtStart
def event_treeInterceptor_gainFocus(self):
"""Triggered when this browse mode interceptor gains focus.
This event is only fired upon entering this treeInterceptor when it was not the current treeInterceptor before.
This is different to L{event_gainFocus}, which is fired when an object inside this treeInterceptor gains focus, even if that object is in the same treeInterceptor.
"""
reportPassThrough(self)
ALWAYS_SWITCH_TO_PASS_THROUGH_ROLES = frozenset({
controlTypes.Role.COMBOBOX,
controlTypes.Role.EDITABLETEXT,
controlTypes.Role.LIST,
controlTypes.Role.LISTITEM,
controlTypes.Role.SLIDER,
controlTypes.Role.TABCONTROL,
controlTypes.Role.MENUBAR,
controlTypes.Role.POPUPMENU,
controlTypes.Role.TREEVIEW,
controlTypes.Role.TREEVIEWITEM,
controlTypes.Role.SPINBUTTON,
controlTypes.Role.TABLEROW,
controlTypes.Role.TABLECELL,
controlTypes.Role.TABLEROWHEADER,
controlTypes.Role.TABLECOLUMNHEADER,
})
SWITCH_TO_PASS_THROUGH_ON_FOCUS_ROLES = frozenset({
controlTypes.Role.LISTITEM,
controlTypes.Role.RADIOBUTTON,
controlTypes.Role.TAB,
controlTypes.Role.MENUITEM,
controlTypes.Role.RADIOMENUITEM,
controlTypes.Role.CHECKMENUITEM,
})
IGNORE_DISABLE_PASS_THROUGH_WHEN_FOCUSED_ROLES = frozenset({
controlTypes.Role.MENUITEM,
controlTypes.Role.RADIOMENUITEM,
controlTypes.Role.CHECKMENUITEM,
controlTypes.Role.TABLECELL,
})
def shouldPassThrough(self, obj, reason: Optional[OutputReason] = None):
"""Determine whether pass through mode should be enabled (focus mode) or disabled (browse mode) for a given object.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
@param reason: The reason for this query;
one of the output reasons, or C{None} for manual pass through mode activation by the user.
@return: C{True} if pass through mode (focus mode) should be enabled, C{False} if it should be disabled (browse mode).
"""
if reason and (
self.disableAutoPassThrough
or (reason == OutputReason.FOCUS and not config.conf["virtualBuffers"]["autoPassThroughOnFocusChange"])
or (reason == OutputReason.CARET and not config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"])
):
# This check relates to auto pass through and auto pass through is disabled, so don't change the pass through state.
return self.passThrough
if reason == OutputReason.QUICKNAV:
return False
states = obj.states
role = obj.role
if controlTypes.State.EDITABLE in states and controlTypes.State.UNAVAILABLE not in states:
return True
# Menus sometimes get focus due to menuStart events even though they don't report as focused/focusable.
if not obj.isFocusable and controlTypes.State.FOCUSED not in states and role != controlTypes.Role.POPUPMENU:
return False
# many controls that are read-only should not switch to passThrough.
# However, there are exceptions.
if controlTypes.State.READONLY in states:
# #13221: For Slack message lists, and the MS Edge downloads window, switch to passthrough
# even though the list item and list are read-only, but focusable.
if (
role == controlTypes.Role.LISTITEM and controlTypes.State.FOCUSED in states
and obj.parent.role == controlTypes.Role.LIST and controlTypes.State.FOCUSABLE in obj.parent.states
):
return True
# Certain controls such as combo boxes and readonly edits are read-only but still interactive.
# #5118: read-only ARIA grids should also be allowed (focusable table cells, rows and headers).
if role not in (
controlTypes.Role.EDITABLETEXT, controlTypes.Role.COMBOBOX, controlTypes.Role.TABLEROW,
controlTypes.Role.TABLECELL, controlTypes.Role.TABLEROWHEADER, controlTypes.Role.TABLECOLUMNHEADER
):
return False
# Any roles or states for which we always switch to passThrough
if role in self.ALWAYS_SWITCH_TO_PASS_THROUGH_ROLES or controlTypes.State.EDITABLE in states:
return True
# focus is moving to this control. Perhaps after pressing tab or clicking a button that brings up a menu (via javascript)
if reason == OutputReason.FOCUS:
if role in self.SWITCH_TO_PASS_THROUGH_ON_FOCUS_ROLES:
return True
# If this is a focus change, pass through should be enabled for certain ancestor containers.
# this is done last for performance considerations. Walking up the through the parents could be costly
while obj and obj != self.rootNVDAObject:
if obj.role == controlTypes.Role.TOOLBAR:
return True
obj = obj.parent
return False
def _get_shouldTrapNonCommandGestures(self):
return config.conf['virtualBuffers']['trapNonCommandGestures']
def script_trapNonCommandGesture(self,gesture):
winsound.PlaySound("default",1)
singleLetterNavEnabled=True #: Whether single letter navigation scripts should be active (true) or if these letters should fall to the application.
def getAlternativeScript(self,gesture,script):
if self.passThrough or not gesture.isCharacter:
return script
if not self.singleLetterNavEnabled:
return None
if not script and self.shouldTrapNonCommandGestures:
script=self.script_trapNonCommandGesture
return script
def script_toggleSingleLetterNav(self,gesture):
if self.singleLetterNavEnabled:
self.singleLetterNavEnabled=False
# Translators: Reported when single letter navigation in browse mode is turned off.
ui.message(_("Single letter navigation off"))
else:
self.singleLetterNavEnabled=True
# Translators: Reported when single letter navigation in browse mode is turned on.
ui.message(_("Single letter navigation on"))
# Translators: the description for the toggleSingleLetterNavigation command in browse mode.
script_toggleSingleLetterNav.__doc__=_("Toggles single letter navigation on and off. When on, single letter keys in browse mode jump to various kinds of elements on the page. When off, these keys are passed to the application")
def _get_ElementsListDialog(self):
return ElementsListDialog
def _iterNodesByType(self,itemType,direction="next",pos=None):
"""
Yields L{QuickNavItem} objects representing the ordered positions in this document according to the type being searched for (e.g. link, heading, table etc).
@param itemType: the type being searched for (e.g. link, heading, table etc)
@type itemType: string
@param direction: the direction in which to search (next, previous, up)
@type direction: string
@param pos: the position in the document from where to start the search.
@type pos: Usually an L{textInfos.TextInfo}
@raise NotImplementedError: This type is not supported by this BrowseMode implementation
"""
raise NotImplementedError
def _iterNotLinkBlock(self, direction="next", pos=None):
raise NotImplementedError
def _quickNavScript(self,gesture, itemType, direction, errorMessage, readUnit):
if itemType=="notLinkBlock":
iterFactory=self._iterNotLinkBlock
else:
iterFactory=lambda direction,info: self._iterNodesByType(itemType,direction,info)
info=self.selection
try:
item = next(iterFactory(direction, info))
except NotImplementedError:
# Translators: a message when a particular quick nav command is not supported in the current document.
ui.message(_("Not supported in this document"))
return
except StopIteration:
ui.message(errorMessage)
return
# #8831: Report before moving because moving might change the focus, which
# might mutate the document, potentially invalidating info if it is
# offset-based.
if not gesture or not willSayAllResume(gesture):
item.report(readUnit=readUnit)
item.moveTo()
@classmethod
def addQuickNav(
cls,
itemType: str,
key: Optional[str],
nextDoc: str,
nextError: str,
prevDoc: str,
prevError: str,
readUnit: Optional[str] = None
):
"""Adds a script for the given quick nav item.
@param itemType: The type of item, I.E. "heading" "Link" ...
@param key: The quick navigation key to bind to the script.
Shift is automatically added for the previous item gesture. E.G. h for heading.
If C{None} is provided, the script is unbound by default.
@param nextDoc: The command description to bind to the script that yields the next quick nav item.
@param nextError: The error message if there are no more quick nav items of type itemType in this direction.
@param prevDoc: The command description to bind to the script that yields the previous quick nav item.
@param prevError: The error message if there are no more quick nav items of type itemType in this direction.
@param readUnit: The unit (one of the textInfos.UNIT_* constants) to announce when moving to this type of item.
For example, only the line is read when moving to tables to avoid reading a potentially massive table.
If None, the entire item will be announced.
"""
scriptSuffix = itemType[0].upper() + itemType[1:]
scriptName = "next%s" % scriptSuffix
funcName = "script_%s" % scriptName
script = lambda self,gesture: self._quickNavScript(gesture, itemType, "next", nextError, readUnit)
script.__doc__ = nextDoc
script.__name__ = funcName
script.resumeSayAllMode = sayAll.CURSOR.CARET
setattr(cls, funcName, script)
if key is not None:
cls.__gestures["kb:%s" % key] = scriptName
scriptName = "previous%s" % scriptSuffix
funcName = "script_%s" % scriptName
script = lambda self,gesture: self._quickNavScript(gesture, itemType, "previous", prevError, readUnit)
script.__doc__ = prevDoc
script.__name__ = funcName
script.resumeSayAllMode = sayAll.CURSOR.CARET
setattr(cls, funcName, script)
if key is not None:
cls.__gestures["kb:shift+%s" % key] = scriptName
def script_elementsList(self, gesture):
# We need this to be a modal dialog, but it mustn't block this script.
def run():
gui.mainFrame.prePopup()
d = self.ElementsListDialog(self)
d.ShowModal()
d.Destroy()
gui.mainFrame.postPopup()
wx.CallAfter(run)
# Translators: the description for the Elements List command in browse mode.
script_elementsList.__doc__ = _("Lists various types of elements in this document")
script_elementsList.ignoreTreeInterceptorPassThrough = True
def _activateNVDAObject(self, obj):
"""Activate an object in response to a user request.
This should generally perform the default action or click on the object.
@param obj: The object to activate.
@type obj: L{NVDAObjects.NVDAObject}
"""
try:
obj.doAction()
except NotImplementedError:
log.debugWarning("doAction not implemented")
def _activatePosition(self, obj=None):
if not obj:
obj=self.currentNVDAObject
if not obj:
return
if obj.role == controlTypes.Role.MATH:
import mathPres
try:
return mathPres.interactWithMathMl(obj.mathMl)
except (NotImplementedError, LookupError):
pass
return
if self.shouldPassThrough(obj):
obj.setFocus()
self.passThrough = True
reportPassThrough(self)
elif obj.role == controlTypes.Role.EMBEDDEDOBJECT or obj.role in self.APPLICATION_ROLES:
obj.setFocus()
speech.speakObject(obj, reason=OutputReason.FOCUS)
else:
self._activateNVDAObject(obj)
def script_activatePosition(self,gesture):
if config.conf["virtualBuffers"]["autoFocusFocusableElements"]:
self._activatePosition()
else:
self._focusLastFocusableObject(activatePosition=True)
# Translators: the description for the activatePosition script on browseMode documents.
script_activatePosition.__doc__ = _("Activates the current object in the document")
def _focusLastFocusableObject(self, activatePosition=False):
"""Used when auto focus focusable elements is disabled to sync the focus
to the browse mode cursor.
When auto focus focusable elements is disabled, NVDA doesn't focus elements
as the user moves the browse mode cursor. However, there are some cases
where the user always wants to interact with the focus; e.g. if they press
the applications key to open the context menu. In these cases, this method
is called first to sync the focus to the browse mode cursor.
"""
obj = self.currentFocusableNVDAObject
if obj!=self.rootNVDAObject and self._shouldSetFocusToObj(obj) and obj!= api.getFocusObject():
obj.setFocus()
# We might be about to activate or pass through a key which will cause
# this object to change (e.g. checking a check box). However, we won't
# actually get the focus event until after the change has occurred.
# Therefore, we must cache properties for speech before the change occurs.
speech.speakObject(obj, OutputReason.ONLYCACHE)
self._objPendingFocusBeforeActivate = obj
if activatePosition:
# Make sure we activate the object at the caret, which is not necessarily focusable.
self._activatePosition()
def script_passThrough(self,gesture):
if not config.conf["virtualBuffers"]["autoFocusFocusableElements"]:
self._focusLastFocusableObject()
gesture.send()
# Translators: the description for the passThrough script on browseMode documents.
script_passThrough.__doc__ = _("Passes gesture through to the application")
def script_disablePassThrough(self, gesture):
if not self.passThrough or self.disableAutoPassThrough:
return gesture.send()
# #3215 ARIA menus should get the Escape key unconditionally so they can handle it without invoking browse mode first
obj = api.getFocusObject()
if obj and obj.role in self.IGNORE_DISABLE_PASS_THROUGH_WHEN_FOCUSED_ROLES:
return gesture.send()
self.passThrough = False
self.disableAutoPassThrough = False
reportPassThrough(self)
script_disablePassThrough.ignoreTreeInterceptorPassThrough = True
def _set_disableAutoPassThrough(self, state):
# If the user manually switches to focus mode with NVDA+space, that enables
# pass-through and disables auto pass-through. If auto focusing of focusable
# elements is disabled, NVDA won't have synced the focus to the browse mode
# cursor. However, since the user is switching to focus mode, they probably
# want to interact with the focus, so sync the focus here.
if (
state
and not config.conf["virtualBuffers"]["autoFocusFocusableElements"]
and self.passThrough
):
self._focusLastFocusableObject()
self._disableAutoPassThrough = state
def _get_disableAutoPassThrough(self):
return self._disableAutoPassThrough
__gestures={
"kb:NVDA+f7": "elementsList",
"kb:enter": "activatePosition",
"kb:numpadEnter": "activatePosition",
"kb:space": "activatePosition",
"kb:NVDA+shift+space":"toggleSingleLetterNav",
"kb:escape": "disablePassThrough",
"kb:control+enter": "passThrough",
"kb:control+numpadEnter": "passThrough",
"kb:shift+enter": "passThrough",
"kb:shift+numpadEnter": "passThrough",
"kb:control+shift+enter": "passThrough",
"kb:control+shift+numpadEnter": "passThrough",
"kb:alt+enter": "passThrough",
"kb:alt+numpadEnter": "passThrough",
"kb:applications": "passThrough",
"kb:shift+applications": "passThrough",
"kb:shift+f10": "passThrough",
}
# Add quick navigation scripts.
qn = BrowseModeTreeInterceptor.addQuickNav
qn("heading", key="h",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading"))
qn("heading1", key="1",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 1"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 1"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 1"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 1"))
qn("heading2", key="2",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 2"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 2"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 2"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 2"))
qn("heading3", key="3",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 3"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 3"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 3"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 3"))
qn("heading4", key="4",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 4"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 4"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 4"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 4"))
qn("heading5", key="5",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 5"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 5"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 5"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 5"))
qn("heading6", key="6",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 6"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 6"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 6"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 6"))
qn("table", key="t",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next table"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next table"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous table"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous table"),
readUnit=textInfos.UNIT_LINE)
qn("link", key="k",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next link"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next link"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous link"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous link"))
qn("visitedLink", key="v",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next visited link"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next visited link"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous visited link"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous visited link"))
qn("unvisitedLink", key="u",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next unvisited link"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next unvisited link"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous unvisited link"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous unvisited link"))
qn("formField", key="f",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next form field"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next form field"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous form field"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous form field"))
qn("list", key="l",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next list"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next list"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous list"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous list"),
readUnit=textInfos.UNIT_LINE)
qn("listItem", key="i",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next list item"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next list item"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous list item"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous list item"))
qn("button", key="b",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next button"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next button"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous button"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous button"))
qn("edit", key="e",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next edit field"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next edit field"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous edit field"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous edit field"),
readUnit=textInfos.UNIT_LINE)
qn("frame", key="m",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next frame"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next frame"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous frame"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous frame"),
readUnit=textInfos.UNIT_LINE)
qn("separator", key="s",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next separator"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next separator"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous separator"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous separator"))
qn("radioButton", key="r",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next radio button"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next radio button"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous radio button"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous radio button"))
qn("comboBox", key="c",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next combo box"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next combo box"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous combo box"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous combo box"))
qn("checkBox", key="x",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next check box"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next check box"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous check box"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous check box"))
qn("graphic", key="g",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next graphic"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next graphic"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous graphic"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous graphic"))
qn("blockQuote", key="q",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next block quote"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next block quote"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous block quote"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous block quote"))
qn("notLinkBlock", key="n",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("skips forward past a block of links"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no more text after a block of links"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("skips backward past a block of links"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no more text before a block of links"),
readUnit=textInfos.UNIT_LINE)
qn("landmark", key="d",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next landmark"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next landmark"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous landmark"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous landmark"),
readUnit=textInfos.UNIT_LINE)
qn("embeddedObject", key="o",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next embedded object"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next embedded object"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous embedded object"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous embedded object"))
qn("annotation", key="a",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next annotation"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next annotation"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous annotation"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous annotation"))
qn("error", key="w",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next error"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next error"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous error"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous error"))
qn(
"article", key=None,
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next article"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next article"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous article"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous article")
)
qn(
"grouping", key=None,
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next grouping"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next grouping"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous grouping"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous grouping")
)
del qn
class ElementsListDialog(
DpiScalingHelperMixinWithoutInit,
gui.contextHelp.ContextHelpMixin,
wx.Dialog # wxPython does not seem to call base class initializer, put last in MRO
):
helpId = "ElementsList"
ELEMENT_TYPES = (
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("link", _("Lin&ks")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("heading", _("&Headings")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("formField", _("&Form fields")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("button", _("&Buttons")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("landmark", _("Lan&dmarks")),
)
Element = collections.namedtuple("Element", ("item", "parent"))
lastSelectedElementType=0
def __init__(self, document):
super().__init__(
parent=gui.mainFrame,
# Translators: The title of the browse mode Elements List dialog.
title=_("Elements List")
)
self.document = document
mainSizer = wx.BoxSizer(wx.VERTICAL)
contentsSizer = wx.BoxSizer(wx.VERTICAL)
# Translators: The label of a group of radio buttons to select the type of element
# in the browse mode Elements List dialog.
child = wx.RadioBox(self, wx.ID_ANY, label=_("Type:"), choices=tuple(et[1] for et in self.ELEMENT_TYPES))
child.SetSelection(self.lastSelectedElementType)
child.Bind(wx.EVT_RADIOBOX, self.onElementTypeChange)
contentsSizer.Add(child, flag=wx.EXPAND)
contentsSizer.AddSpacer(gui.guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS)
self.tree = wx.TreeCtrl(
self,
size=self.scaleSize((500, 300)), # height is chosen to ensure the dialog will fit on an 800x600 screen
style=wx.TR_HAS_BUTTONS | wx.TR_HIDE_ROOT | wx.TR_LINES_AT_ROOT | wx.TR_SINGLE | wx.TR_EDIT_LABELS
)
self.tree.Bind(wx.EVT_SET_FOCUS, self.onTreeSetFocus)
self.tree.Bind(wx.EVT_CHAR, self.onTreeChar)
self.tree.Bind(wx.EVT_TREE_BEGIN_LABEL_EDIT, self.onTreeLabelEditBegin)
self.tree.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.onTreeLabelEditEnd)
self.treeRoot = self.tree.AddRoot("root")
contentsSizer.Add(self.tree,flag=wx.EXPAND)
contentsSizer.AddSpacer(gui.guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS)
# Translators: The label of an editable text field to filter the elements
# in the browse mode Elements List dialog.
filterText = _("Filter b&y:")
labeledCtrl = gui.guiHelper.LabeledControlHelper(self, filterText, wx.TextCtrl)
self.filterEdit = labeledCtrl.control
self.filterEdit.Bind(wx.EVT_TEXT, self.onFilterEditTextChange)
contentsSizer.Add(labeledCtrl.sizer)
contentsSizer.AddSpacer(gui.guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS)
bHelper = gui.guiHelper.ButtonHelper(wx.HORIZONTAL)
# Translators: The label of a button to activate an element in the browse mode Elements List dialog.
# Beware not to set an accelerator that would collide with other controls in this dialog, such as an
# element type radio label.
self.activateButton = bHelper.addButton(self, label=_("Activate"))
self.activateButton.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(True))
# Translators: The label of a button to move to an element
# in the browse mode Elements List dialog.
self.moveButton = bHelper.addButton(self, label=_("&Move to"))
self.moveButton.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(False))
bHelper.addButton(self, id=wx.ID_CANCEL)
contentsSizer.Add(bHelper.sizer, flag=wx.ALIGN_RIGHT)
mainSizer.Add(contentsSizer, border=gui.guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
mainSizer.Fit(self)
self.SetSizer(mainSizer)
self.tree.SetFocus()
self.initElementType(self.ELEMENT_TYPES[self.lastSelectedElementType][0])
self.CentreOnScreen()
def onElementTypeChange(self, evt):
elementType=evt.GetInt()
# We need to make sure this gets executed after the focus event.
# Otherwise, NVDA doesn't seem to get the event.
queueHandler.queueFunction(queueHandler.eventQueue, self.initElementType, self.ELEMENT_TYPES[elementType][0])
self.lastSelectedElementType=elementType
def initElementType(self, elType):
if elType in ("link","button"):
# Links and buttons can be activated.
self.activateButton.Enable()
self.SetAffirmativeId(self.activateButton.GetId())
else:
# No other element type can be activated.
self.activateButton.Disable()
self.SetAffirmativeId(self.moveButton.GetId())
# Gather the elements of this type.
self._elements = []
self._initialElement = None
parentElements = []
isAfterSelection=False
for item in self.document._iterNodesByType(elType):
# Find the parent element, if any.
for parent in reversed(parentElements):
if item.isChild(parent.item):
break
else:
# We're not a child of this parent, so this parent has no more children and can be removed from the stack.
parentElements.pop()
else:
# No parent found, so we're at the root.
# Note that parentElements will be empty at this point, as all parents are no longer relevant and have thus been removed from the stack.
parent = None
element=self.Element(item,parent)
self._elements.append(element)
if not isAfterSelection:
isAfterSelection=item.isAfterSelection
if not isAfterSelection:
# The element immediately preceding or overlapping the caret should be the initially selected element.
# Since we have not yet passed the selection, use this as the initial element.
try:
self._initialElement = self._elements[-1]
except IndexError:
# No previous element.
pass
# This could be the parent of a subsequent element, so add it to the parents stack.
parentElements.append(element)
# Start with no filtering.
self.filterEdit.ChangeValue("")
self.filter("", newElementType=True)
def filter(self, filterText, newElementType=False):
# If this is a new element type, use the element nearest the cursor.
# Otherwise, use the currently selected element.
# #8753: wxPython 4 returns "invalid tree item" when the tree view is empty, so use initial element if appropriate.
try:
defaultElement = self._initialElement if newElementType else self.tree.GetItemData(self.tree.GetSelection())
except:
defaultElement = self._initialElement
# Clear the tree.
self.tree.DeleteChildren(self.treeRoot)
# Populate the tree with elements matching the filter text.
elementsToTreeItems = {}
defaultItem = None
matched = False
#Do case-insensitive matching by lowering both filterText and each element's text.
filterText=filterText.lower()
for element in self._elements:
label=element.item.label
if filterText and filterText not in label.lower():
continue
matched = True
parent = element.parent
if parent:
parent = elementsToTreeItems.get(parent)
item = self.tree.AppendItem(parent or self.treeRoot, label)
self.tree.SetItemData(item, element)
elementsToTreeItems[element] = item
if element == defaultElement:
defaultItem = item
self.tree.ExpandAll()
if not matched:
# No items, so disable the buttons.
self.activateButton.Disable()
self.moveButton.Disable()
return
# If there's no default item, use the first item in the tree.
self.tree.SelectItem(defaultItem or self.tree.GetFirstChild(self.treeRoot)[0])
# Enable the button(s).
# If the activate button isn't the default button, it is disabled for this element type and shouldn't be enabled here.
if self.AffirmativeId == self.activateButton.Id:
self.activateButton.Enable()
self.moveButton.Enable()
def onTreeSetFocus(self, evt):
# Start with no search.
self._searchText = ""
self._searchCallLater = None
evt.Skip()
def onTreeChar(self, evt):
key = evt.KeyCode
if key == wx.WXK_RETURN:
# The enter key should be propagated to the dialog and thus activate the default button,
# but this is broken (wx ticket #3725).
# Therefore, we must catch the enter key here.
# Activate the current default button.
evt = wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_ANY)
button = self.FindWindowById(self.AffirmativeId)
if button.Enabled:
button.ProcessEvent(evt)
else:
wx.Bell()
elif key == wx.WXK_F2:
item=self.tree.GetSelection()
if item:
selectedItemType=self.tree.GetItemData(item).item
self.tree.EditLabel(item)
evt.Skip()
elif key >= wx.WXK_START or key == wx.WXK_BACK:
# Non-printable character.
self._searchText = ""
evt.Skip()
else:
# Search the list.
# We have to implement this ourselves, as tree views don't accept space as a search character.
char = chr(evt.UnicodeKey).lower()
# IF the same character is typed twice, do the same search.
if self._searchText != char:
self._searchText += char
if self._searchCallLater:
self._searchCallLater.Restart()
else:
self._searchCallLater = wx.CallLater(1000, self._clearSearchText)
self.search(self._searchText)
def onTreeLabelEditBegin(self,evt):
item=self.tree.GetSelection()
selectedItemType = self.tree.GetItemData(item).item
if not selectedItemType.isRenameAllowed:
evt.Veto()
def onTreeLabelEditEnd(self,evt):
selectedItemNewName=evt.GetLabel()
item=self.tree.GetSelection()
selectedItemType = self.tree.GetItemData(item).item
selectedItemType.rename(selectedItemNewName)
def _clearSearchText(self):
self._searchText = ""
def search(self, searchText):
item = self.tree.GetSelection()
if not item:
# No items.
return
# First try searching from the current item.
# Failing that, search from the first item.
items = itertools.chain(self._iterReachableTreeItemsFromItem(item), self._iterReachableTreeItemsFromItem(self.tree.GetFirstChild(self.treeRoot)[0]))
if len(searchText) == 1:
# If only a single character has been entered, skip (search after) the current item.
next(items)
for item in items:
if self.tree.GetItemText(item).lower().startswith(searchText):
self.tree.SelectItem(item)
return
# Not found.
wx.Bell()
def _iterReachableTreeItemsFromItem(self, item):
while item:
yield item
childItem = self.tree.GetFirstChild(item)[0]
if childItem and self.tree.IsExpanded(item):
# Has children and is reachable, so recurse.
for childItem in self._iterReachableTreeItemsFromItem(childItem):
yield childItem
item = self.tree.GetNextSibling(item)
def onFilterEditTextChange(self, evt):
self.filter(self.filterEdit.GetValue())
evt.Skip()
def onAction(self, activate):
prevFocus = gui.mainFrame.prevFocus
self.Close()
# Save off the last selected element type on to the class so its used in initialization next time.
self.__class__.lastSelectedElementType=self.lastSelectedElementType
item = self.tree.GetSelection()
item = self.tree.GetItemData(item).item
if activate:
item.activate()
else:
def move():
speech.cancelSpeech()
# Avoid double announce if item.obj is about to gain focus.
if not (
self.document.passThrough
and getattr(item, "obj", False)
and item.obj != prevFocus
and controlTypes.State.FOCUSABLE in item.obj.states
):
# #8831: Report before moving because moving might change the focus, which
# might mutate the document, potentially invalidating info if it is
# offset-based.
item.report()
item.moveTo()
# We must use core.callLater rather than wx.CallLater to ensure that the callback runs within NVDA's core pump.
# If it didn't, and it directly or indirectly called wx.Yield, it could start executing NVDA's core pump from within the yield, causing recursion.
core.callLater(100, move)
class BrowseModeDocumentTextInfo(textInfos.TextInfo):
def _get_focusableNVDAObjectAtStart(self):
try:
item = next(self.obj._iterNodesByType("focusable", "up", self))
except StopIteration:
return self.obj.rootNVDAObject
if not item:
return self.obj.rootNVDAObject
return item.obj
class BrowseModeDocumentTreeInterceptor(documentBase.DocumentWithTableNavigation,cursorManager.CursorManager,BrowseModeTreeInterceptor,treeInterceptorHandler.DocumentTreeInterceptor):
programmaticScrollMayFireEvent = False
def __init__(self,obj):
super(BrowseModeDocumentTreeInterceptor,self).__init__(obj)
self._lastProgrammaticScrollTime = None
self.documentConstantIdentifier = self.documentConstantIdentifier
self._lastFocusObj = None
self._objPendingFocusBeforeActivate = None
self._hadFirstGainFocus = False
self._enteringFromOutside = True
# We need to cache this because it will be unavailable once the document dies.
if not hasattr(self.rootNVDAObject.appModule, "_browseModeRememberedCaretPositions"):
self.rootNVDAObject.appModule._browseModeRememberedCaretPositions = {}
self._lastCaretPosition = None
#: True if the last caret move was due to a focus change.
self._lastCaretMoveWasFocus = False
def terminate(self):
if self.shouldRememberCaretPositionAcrossLoads and self._lastCaretPosition:
try:
self.rootNVDAObject.appModule._browseModeRememberedCaretPositions[self.documentConstantIdentifier] = self._lastCaretPosition
except AttributeError:
# The app module died.
pass
def _get_currentNVDAObject(self):
return self.makeTextInfo(textInfos.POSITION_CARET).NVDAObjectAtStart
def event_treeInterceptor_gainFocus(self):
doSayAll=False
hadFirstGainFocus=self._hadFirstGainFocus
if not hadFirstGainFocus:
# This treeInterceptor is gaining focus for the first time.
# Fake a focus event on the focus object, as the treeInterceptor may have missed the actual focus event.
focus = api.getFocusObject()
self.event_gainFocus(focus, lambda: focus.event_gainFocus())
if not self.passThrough:
# We only set the caret position if in browse mode.
# If in focus mode, the document must have forced the focus somewhere,
# so we don't want to override it.
initialPos = self._getInitialCaretPos()
if initialPos:
self.selection = self.makeTextInfo(initialPos)
reportPassThrough(self)
doSayAll=config.conf['virtualBuffers']['autoSayAllOnPageLoad']
self._hadFirstGainFocus = True
if not self.passThrough:
if doSayAll:
speech.speakObjectProperties(self.rootNVDAObject, name=True, states=True, reason=OutputReason.FOCUS)
sayAll.SayAllHandler.readText(sayAll.CURSOR.CARET)
else:
# Speak it like we would speak focus on any other document object.
# This includes when entering the treeInterceptor for the first time:
if not hadFirstGainFocus:
speech.speakObject(self.rootNVDAObject, reason=OutputReason.FOCUS)
else:
# And when coming in from an outside object
# #4069 But not when coming up from a non-rendered descendant.
ancestors=api.getFocusAncestors()
fdl=api.getFocusDifferenceLevel()
try:
tl=ancestors.index(self.rootNVDAObject)
except ValueError:
tl=len(ancestors)
if fdl<=tl:
speech.speakObject(self.rootNVDAObject, reason=OutputReason.FOCUS)
info = self.selection
if not info.isCollapsed:
speech.speakPreselectedText(info.text)
else:
info.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(info, reason=OutputReason.CARET, unit=textInfos.UNIT_LINE)
reportPassThrough(self)
braille.handler.handleGainFocus(self)
def event_caret(self, obj, nextHandler):
if self.passThrough:
nextHandler()
def _activateLongDesc(self,controlField):
"""
Activates (presents) the long description for a particular field (usually a graphic).
@param controlField: the field who's long description should be activated. This field is guaranteed to have states containing HASLONGDESC state.
@type controlField: dict
"""
raise NotImplementedError
def _activatePosition(self, obj=None, info=None):
if info:
obj=info.NVDAObjectAtStart
if not obj:
return
super(BrowseModeDocumentTreeInterceptor,self)._activatePosition(obj=obj)
def _set_selection(self, info, reason=OutputReason.CARET):
super(BrowseModeDocumentTreeInterceptor, self)._set_selection(info)
if isScriptWaiting() or not info.isCollapsed:
return
# Save the last caret position for use in terminate().
# This must be done here because the buffer might be cleared just before terminate() is called,
# causing the last caret position to be lost.
caret = info.copy()
caret.collapse()
self._lastCaretPosition = caret.bookmark
review.handleCaretMove(caret)
if reason == OutputReason.FOCUS:
self._lastCaretMoveWasFocus = True
focusObj = api.getFocusObject()
if focusObj==self.rootNVDAObject:
return
else:
self._lastCaretMoveWasFocus = False
focusObj=info.focusableNVDAObjectAtStart
obj=info.NVDAObjectAtStart
if not obj:
log.debugWarning("Invalid NVDAObjectAtStart")
return
if obj==self.rootNVDAObject:
return
obj.scrollIntoView()
if self.programmaticScrollMayFireEvent:
self._lastProgrammaticScrollTime = time.time()
if focusObj:
self.passThrough = self.shouldPassThrough(focusObj, reason=reason)
if (
not eventHandler.isPendingEvents("gainFocus")
and focusObj != self.rootNVDAObject
and focusObj != api.getFocusObject()
and self._shouldSetFocusToObj(focusObj)
):
followBrowseModeFocus = config.conf["virtualBuffers"]["autoFocusFocusableElements"]
if followBrowseModeFocus or self.passThrough:
focusObj.setFocus()
# Queue the reporting of pass through mode so that it will be spoken after the actual content.
queueHandler.queueFunction(queueHandler.eventQueue, reportPassThrough, self)
def _shouldSetFocusToObj(self, obj):
"""Determine whether an object should receive focus.
Subclasses may extend or override this method.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
"""
return obj.role not in self.APPLICATION_ROLES and obj.isFocusable and obj.role!=controlTypes.Role.EMBEDDEDOBJECT
def script_activateLongDesc(self,gesture):
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.expand("character")
for field in reversed(info.getTextWithFields()):
if isinstance(field,textInfos.FieldCommand) and field.command=="controlStart":
states=field.field.get('states')
if states and controlTypes.State.HASLONGDESC in states:
self._activateLongDesc(field.field)
break
else:
# Translators: the message presented when the activateLongDescription script cannot locate a long description to activate.
ui.message(_("No long description"))
# Translators: the description for the activateLongDescription script on browseMode documents.
script_activateLongDesc.__doc__=_("Shows the long description at this position if one is found.")
def event_caretMovementFailed(self, obj, nextHandler, gesture=None):
if not self.passThrough or not gesture or not config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"]:
return nextHandler()
if gesture.mainKeyName in ("home", "end"):
# Home, end, control+home and control+end should not disable pass through.
return nextHandler()
script = self.getScript(gesture)
if not script:
return nextHandler()
# We've hit the edge of the focused control.
# Therefore, move the virtual caret to the same edge of the field.
info = self.makeTextInfo(textInfos.POSITION_CARET)
info.expand(textInfos.UNIT_CONTROLFIELD)
if gesture.mainKeyName in ("leftArrow", "upArrow", "pageUp"):
info.collapse()
else:
info.collapse(end=True)
info.move(textInfos.UNIT_CHARACTER, -1)
info.updateCaret()
scriptHandler.queueScript(script, gesture)
currentExpandedControl=None #: an NVDAObject representing the control that has just been expanded with the collapseOrExpandControl script.
def script_collapseOrExpandControl(self, gesture):
if not config.conf["virtualBuffers"]["autoFocusFocusableElements"]:
self._focusLastFocusableObject()
oldFocus = api.getFocusObject()
oldFocusStates = oldFocus.states
gesture.send()
if controlTypes.State.COLLAPSED in oldFocusStates:
self.passThrough = True
# When a control (such as a combo box) is expanded, we expect that its descendants will be classed as being outside the browseMode document.
# We save off the expanded control so that the next focus event within the browseMode document can see if it is for the control,
# and if so, it disables passthrough, as the control has obviously been collapsed again.
self.currentExpandedControl=oldFocus
elif not self.disableAutoPassThrough:
self.passThrough = False
reportPassThrough(self)
def _tabOverride(self, direction):
"""Override the tab order if the virtual caret is not within the currently focused node.
This is done because many nodes are not focusable and it is thus possible for the virtual caret to be unsynchronised with the focus.
In this case, we want tab/shift+tab to move to the next/previous focusable node relative to the virtual caret.
If the virtual caret is within the focused node, the tab/shift+tab key should be passed through to allow normal tab order navigation.
Note that this method does not pass the key through itself if it is not overridden. This should be done by the calling script if C{False} is returned.
@param direction: The direction in which to move.
@type direction: str
@return: C{True} if the tab order was overridden, C{False} if not.
@rtype: bool
"""
if self._lastCaretMoveWasFocus:
# #5227: If the caret was last moved due to a focus change, don't override tab.
# This ensures that tabbing behaves as expected after tabbing hits an iframe document.
return False
focus = api.getFocusObject()
try:
focusInfo = self.makeTextInfo(focus)
except:
return False
# We only want to override the tab order if the caret is not within the focused node.
caretInfo=self.makeTextInfo(textInfos.POSITION_CARET)
#Only check that the caret is within the focus for things that ar not documents
#As for documents we should always override
if focus.role!=controlTypes.Role.DOCUMENT or controlTypes.State.EDITABLE in focus.states:
# Expand to one character, as isOverlapping() doesn't yield the desired results with collapsed ranges.
caretInfo.expand(textInfos.UNIT_CHARACTER)
if focusInfo.isOverlapping(caretInfo):
return False
# If we reach here, we do want to override tab/shift+tab if possible.
# Find the next/previous focusable node.
try:
item = next(self._iterNodesByType("focusable", direction, caretInfo))
except StopIteration:
return False
obj=item.obj
newInfo=item.textInfo
if obj == api.getFocusObject():
# This node is already focused, so we need to move to and speak this node here.
newCaret = newInfo.copy()
newCaret.collapse()
self._set_selection(newCaret, reason=OutputReason.FOCUS)
if self.passThrough:
obj.event_gainFocus()
else:
speech.speakTextInfo(newInfo, reason=OutputReason.FOCUS)
else:
# This node doesn't have the focus, so just set focus to it. The gainFocus event will handle the rest.
obj.setFocus()
return True
def script_tab(self, gesture):
if not self._tabOverride("next"):
gesture.send()
def script_shiftTab(self, gesture):
if not self._tabOverride("previous"):
gesture.send()
def event_focusEntered(self,obj,nextHandler):
if obj==self.rootNVDAObject:
self._enteringFromOutside = True
# Even if passThrough is enabled, we still completely drop focusEntered events here.
# In order to get them back when passThrough is enabled, we replay them with the _replayFocusEnteredEvents method in event_gainFocus.
# The reason for this is to ensure that focusEntered events are delayed until a focus event has had a chance to disable passthrough mode.
# As in this case we would not want them.
def _shouldIgnoreFocus(self, obj):
"""Determines whether focus on a given object should be ignored.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
@return: C{True} if focus on L{obj} should be ignored, C{False} otherwise.
@rtype: bool
"""
return False
def _postGainFocus(self, obj):
"""Executed after a gainFocus within the browseMode document.
This will not be executed if L{event_gainFocus} determined that it should abort and call nextHandler.
@param obj: The object that gained focus.
@type obj: L{NVDAObjects.NVDAObject}
"""
def _replayFocusEnteredEvents(self):
# We blocked the focusEntered events because we were in browse mode,
# but now that we've switched to focus mode, we need to fire them.
for parent in api.getFocusAncestors()[api.getFocusDifferenceLevel():]:
try:
parent.event_focusEntered()
except:
log.exception("Error executing focusEntered event: %s" % parent)
def event_gainFocus(self, obj, nextHandler):
enteringFromOutside=self._enteringFromOutside
self._enteringFromOutside=False
if not self.isReady:
if self.passThrough:
self._replayFocusEnteredEvents()
nextHandler()
return
# If a control has been expanded by the collapseOrExpandControl script, and this focus event is for it,
# disable passThrough and report the control, as the control has obviously been collapsed again.
# Note that whether or not this focus event was for that control, the last expanded control is forgotten, so that only the next focus event for the browseMode document can handle the collapsed control.
lastExpandedControl=self.currentExpandedControl
self.currentExpandedControl=None
if self.passThrough and obj==lastExpandedControl:
self.passThrough=False
reportPassThrough(self)
nextHandler()
return
if enteringFromOutside and not self.passThrough and self._lastFocusObj==obj:
# We're entering the document from outside (not returning from an inside object/application; #3145)
# and this was the last non-root node with focus, so ignore this focus event.
# Otherwise, if the user switches away and back to this document, the cursor will jump to this node.
# This is not ideal if the user was positioned over a node which cannot receive focus.
return
if obj==self.rootNVDAObject:
if self.passThrough:
self._replayFocusEnteredEvents()
return nextHandler()
return
if not self.passThrough and self._shouldIgnoreFocus(obj):
return
# If the previous focus object was removed, we might hit a false positive for overlap detection.
# Track the previous focus target so that we can account for this scenario.
previousFocusObjIsDefunct = False
if self._lastFocusObj:
try:
states = self._lastFocusObj.states
previousFocusObjIsDefunct = controlTypes.State.DEFUNCT in states
except Exception:
log.debugWarning(
"Error fetching states when checking for defunct object. Treating object as defunct anyway.",
exc_info=True
)
previousFocusObjIsDefunct = True
self._lastFocusObj=obj
try:
focusInfo = self.makeTextInfo(obj)
except:
# This object is not in the treeInterceptor, even though it resides beneath the document.
# Automatic pass through should be enabled in certain circumstances where this occurs.
if not self.passThrough and self.shouldPassThrough(obj, reason=OutputReason.FOCUS):
self.passThrough=True
reportPassThrough(self)
self._replayFocusEnteredEvents()
return nextHandler()
#We only want to update the caret and speak the field if we're not in the same one as before
caretInfo=self.makeTextInfo(textInfos.POSITION_CARET)
# Expand to one character, as isOverlapping() doesn't treat, for example, (4,4) and (4,5) as overlapping.
caretInfo.expand(textInfos.UNIT_CHARACTER)
isOverlapping = focusInfo.isOverlapping(caretInfo)
if not self._hadFirstGainFocus or not isOverlapping or (isOverlapping and previousFocusObjIsDefunct):
# The virtual caret is not within the focus node.
oldPassThrough=self.passThrough
passThrough = self.shouldPassThrough(obj, reason=OutputReason.FOCUS)
if not oldPassThrough and (passThrough or sayAll.SayAllHandler.isRunning()):
# If pass-through is disabled, cancel speech, as a focus change should cause page reading to stop.
# This must be done before auto-pass-through occurs, as we want to stop page reading even if pass-through will be automatically enabled by this focus change.
speech.cancelSpeech()
self.passThrough=passThrough
if not self.passThrough:
# We read the info from the browseMode document instead of the control itself.
speech.speakTextInfo(focusInfo, reason=OutputReason.FOCUS)
# However, we still want to update the speech property cache so that property changes will be spoken properly.
speech.speakObject(obj, controlTypes.OutputReason.ONLYCACHE)
# As we do not call nextHandler which would trigger the vision framework to handle gain focus,
# we need to call it manually here.
vision.handler.handleGainFocus(obj)
else:
# Although we are going to speak the object rather than textInfo content, we still need to silently speak the textInfo content so that the textInfo speech cache is updated correctly.
# Not doing this would cause later browseMode speaking to either not speak controlFields it had entered, or speak controlField exits after having already exited.
# See #7435 for a discussion on this.
speech.speakTextInfo(focusInfo, reason=OutputReason.ONLYCACHE)
self._replayFocusEnteredEvents()
nextHandler()
focusInfo.collapse()
self._set_selection(focusInfo, reason=OutputReason.FOCUS)
else:
# The virtual caret was already at the focused node.
if not self.passThrough:
# This focus change was caused by a virtual caret movement, so don't speak the focused node to avoid double speaking.
# However, we still want to update the speech property cache so that property changes will be spoken properly.
speech.speakObject(obj, OutputReason.ONLYCACHE)
if config.conf["virtualBuffers"]["autoFocusFocusableElements"]:
# As we do not call nextHandler which would trigger the vision framework to handle gain focus,
# we need to call it manually here.
# Note: this is usually called after the caret movement.
vision.handler.handleGainFocus(obj)
elif (
self._objPendingFocusBeforeActivate
and obj == self._objPendingFocusBeforeActivate
and obj is not self._objPendingFocusBeforeActivate
):
# With auto focus focusable elements disabled, when the user activates
# an element (e.g. by pressing enter) or presses a key which we pass
# through (e.g. control+enter), we call _focusLastFocusableObject.
# However, the activation/key press might cause a property change
# before we get the focus event, so NVDA's normal reporting of
# changes to the focus won't pick it up.
# The speech property cache on _objPendingFocusBeforeActivate reflects
# the properties before the activation/key, so use that to speak any
# changes.
speech.speakObject(
self._objPendingFocusBeforeActivate,
OutputReason.CHANGE
)
self._objPendingFocusBeforeActivate = None
else:
self._replayFocusEnteredEvents()
return nextHandler()
self._postGainFocus(obj)
event_gainFocus.ignoreIsReady=True
def _handleScrollTo(
self,
obj: Union[NVDAObject, textInfos.TextInfo],
) -> bool:
"""Handle scrolling the browseMode document to a given object in response to an event.
Subclasses should call this from an event which indicates that the document has scrolled.
@postcondition: The virtual caret is moved to L{obj} and the buffer content for L{obj} is reported.
@param obj: The object to which the document should scroll.
@return: C{True} if the document was scrolled, C{False} if not.
@note: If C{False} is returned, calling events should probably call their nextHandler.
"""
if self.programmaticScrollMayFireEvent and self._lastProgrammaticScrollTime and time.time() - self._lastProgrammaticScrollTime < 0.4:
# This event was probably caused by this browseMode document's call to scrollIntoView().
# Therefore, ignore it. Otherwise, the cursor may bounce back to the scroll point.
# However, pretend we handled it, as we don't want it to be passed on to the object either.
return True
if isinstance(obj, NVDAObject):
try:
scrollInfo = self.makeTextInfo(obj)
except (NotImplementedError, RuntimeError):
return False
elif isinstance(obj, textInfos.TextInfo):
scrollInfo = obj.copy()
else:
raise ValueError(f"{obj} is not a supported type")
#We only want to update the caret and speak the field if we're not in the same one as before
caretInfo=self.makeTextInfo(textInfos.POSITION_CARET)
# Expand to one character, as isOverlapping() doesn't treat, for example, (4,4) and (4,5) as overlapping.
caretInfo.expand(textInfos.UNIT_CHARACTER)
if not scrollInfo.isOverlapping(caretInfo):
if scrollInfo.isCollapsed:
scrollInfo.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(scrollInfo, reason=OutputReason.CARET)
scrollInfo.collapse()
self.selection = scrollInfo
return True
return False
def _isNVDAObjectInApplication_noWalk(self, obj):
"""Determine whether a given object is within an application without walking ancestors.
The base implementation simply checks whether the object has an application role.
Subclasses can override this if they can provide a definite answer without needing to walk.
For example, for virtual buffers, if the object is in the buffer,
it definitely isn't in an application.
L{_isNVDAObjectInApplication} calls this and walks to the next ancestor if C{None} is returned.
@return: C{True} if definitely in an application,
C{False} if definitely not in an application,
C{None} if this can't be determined without walking ancestors.
"""
if (
# roles such as application and dialog should be treated as being within a "application" and therefore outside of the browseMode document.
obj.role in self.APPLICATION_ROLES
# Anything other than an editable text box inside a combo box should be
# treated as being outside a browseMode document.
or (
obj.role != controlTypes.Role.EDITABLETEXT and obj.container
and obj.container.role == controlTypes.Role.COMBOBOX
)
):
return True
return None
def _isNVDAObjectInApplication(self, obj):
"""Determine whether a given object is within an application.
The object is considered to be within an application if it or one of its ancestors has an application role.
This should only be called on objects beneath the treeInterceptor's root NVDAObject.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
@return: C{True} if L{obj} is within an application, C{False} otherwise.
@rtype: bool
"""
# We cache the result for each object we walk.
# There can be browse mode documents within other documents and the result might be different between these,
# so the cache must be maintained on the TreeInterceptor rather than the object itself.
try:
cache = self._isInAppCache
except AttributeError:
# Create this lazily, as this method isn't used by all browse mode implementations.
cache = self._isInAppCache = weakref.WeakKeyDictionary()
objs = []
def doResult(result):
# Cache this on descendants we've walked over.
for obj in objs:
cache[obj] = result
return result
while obj and obj != self.rootNVDAObject:
inApp = cache.get(obj)
if inApp is not None:
# We found a cached result.
return doResult(inApp)
objs.append(obj)
inApp = self._isNVDAObjectInApplication_noWalk(obj)
if inApp is not None:
return doResult(inApp)
# We must walk ancestors.
# Cache container.
container = obj.container
obj.container = container
obj = container
return doResult(False)
def _get_documentConstantIdentifier(self):
"""Get the constant identifier for this document.
This identifier should uniquely identify all instances (not just one instance) of a document for at least the current session of the hosting application.
Generally, the document URL should be used.
@return: The constant identifier for this document, C{None} if there is none.
"""
return None
def _get_shouldRememberCaretPositionAcrossLoads(self):
"""Specifies whether the position of the caret should be remembered when this document is loaded again.
This is useful when the browser remembers the scroll position for the document,
but does not communicate this information via APIs.
The remembered caret position is associated with this document using L{documentConstantIdentifier}.
@return: C{True} if the caret position should be remembered, C{False} if not.
@rtype: bool
"""
docConstId = self.documentConstantIdentifier
# Return True if the URL indicates that this is probably a web browser document.
# We do this check because we don't want to remember caret positions for email messages, etc.
if isinstance(docConstId, str):
protocols=("http", "https", "ftp", "ftps", "file")
protocol=docConstId.split("://", 1)[0]
return protocol in protocols
return False
def _getInitialCaretPos(self):
"""Retrieve the initial position of the caret after the buffer has been loaded.
This position, if any, will be passed to L{makeTextInfo}.
Subclasses should extend this method.
@return: The initial position of the caret, C{None} if there isn't one.
@rtype: TextInfo position
"""
if self.shouldRememberCaretPositionAcrossLoads:
try:
return self.rootNVDAObject.appModule._browseModeRememberedCaretPositions[self.documentConstantIdentifier]
except KeyError:
pass
return None
def getEnclosingContainerRange(self, textRange):
textRange = textRange.copy()
textRange.collapse()
try:
item = next(self._iterNodesByType("container", "up", textRange))
except (NotImplementedError,StopIteration):
try:
item = next(self._iterNodesByType("landmark", "up", textRange))
except (NotImplementedError,StopIteration):
return
return item.textInfo
def script_moveToStartOfContainer(self,gesture):
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.expand(textInfos.UNIT_CHARACTER)
container=self.getEnclosingContainerRange(info)
if not container:
# Translators: Reported when the user attempts to move to the start or end of a container
# (list, table, etc.) but there is no container.
ui.message(_("Not in a container"))
return
container.collapse()
self._set_selection(container, reason=OutputReason.QUICKNAV)
if not willSayAllResume(gesture):
container.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(container, reason=OutputReason.FOCUS)
script_moveToStartOfContainer.resumeSayAllMode = sayAll.CURSOR.CARET
# Translators: Description for the Move to start of container command in browse mode.
script_moveToStartOfContainer.__doc__=_("Moves to the start of the container element, such as a list or table")
def script_movePastEndOfContainer(self,gesture):
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.expand(textInfos.UNIT_CHARACTER)
container=self.getEnclosingContainerRange(info)
if not container:
# Translators: Reported when the user attempts to move to the start or end of a container
# (list, table, etc.) but there is no container.
ui.message(_("Not in a container"))
return
container.collapse(end=True)
docEnd=container.obj.makeTextInfo(textInfos.POSITION_LAST)
if container.compareEndPoints(docEnd,"endToEnd")>=0:
container=docEnd
# Translators: a message reported when:
# Review cursor is at the bottom line of the current navigator object.
# Landing at the end of a browse mode document when trying to jump to the end of the current container.
ui.message(_("Bottom"))
self._set_selection(container, reason=OutputReason.QUICKNAV)
if not willSayAllResume(gesture):
container.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(container, reason=OutputReason.FOCUS)
script_movePastEndOfContainer.resumeSayAllMode = sayAll.CURSOR.CARET
# Translators: Description for the Move past end of container command in browse mode.
script_movePastEndOfContainer.__doc__=_("Moves past the end of the container element, such as a list or table")
NOT_LINK_BLOCK_MIN_LEN = 30
def _isSuitableNotLinkBlock(self, textRange):
return len(textRange.text) >= self.NOT_LINK_BLOCK_MIN_LEN
def _iterNotLinkBlock(self, direction="next", pos=None):
links = self._iterNodesByType("link", direction=direction, pos=pos)
# We want to compare each link against the next link.
item1 = next(links, None)
if item1 is None:
return
for item2 in links:
# If the distance between the links is small, this is probably just a piece of non-link text within a block of links; e.g. an inactive link of a nav bar.
if direction=="previous":
textRange=item1.textInfo.copy()
textRange.collapse()
textRange.setEndPoint(item2.textInfo,"startToEnd")
else:
textRange=item2.textInfo.copy()
textRange.collapse()
textRange.setEndPoint(item1.textInfo,"startToEnd")
if self._isSuitableNotLinkBlock(textRange):
yield TextInfoQuickNavItem("notLinkBlock", self, textRange)
item1=item2
__gestures={
"kb:NVDA+d": "activateLongDesc",
"kb:alt+upArrow": "collapseOrExpandControl",
"kb:alt+downArrow": "collapseOrExpandControl",
"kb:tab": "tab",
"kb:shift+tab": "shiftTab",
"kb:shift+,": "moveToStartOfContainer",
"kb:,": "movePastEndOfContainer",
}
@script(
description=_(
# Translators: the description for the toggleScreenLayout script.
"Toggles on and off if the screen layout is preserved while rendering the document content"
),
gesture="kb:NVDA+v",
)
def script_toggleScreenLayout(self, gesture):
# Translators: The message reported for not supported toggling of screen layout
ui.message(_("Not supported in this document."))
| 44.265625
| 229
| 0.740899
| 10,985
| 84,990
| 5.67929
| 0.117979
| 0.025005
| 0.015837
| 0.019491
| 0.365092
| 0.303765
| 0.279145
| 0.263516
| 0.258243
| 0.249427
| 0
| 0.002256
| 0.181245
| 84,990
| 1,919
| 230
| 44.288692
| 0.894289
| 0.426191
| 0
| 0.297735
| 0
| 0.000809
| 0.134236
| 0.011349
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072816
| false
| 0.072006
| 0.032362
| 0.008091
| 0.193366
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
d9f9cd4e7a0b73e79eb71d2bdbfa755d69a9cc9d
| 597
|
py
|
Python
|
examples/first_char_last_column.py
|
clarkfitzg/sta141c
|
129704ba0952a4b80f9b093dcfa49f49f37b052d
|
[
"MIT"
] | 24
|
2019-01-08T20:10:11.000Z
|
2021-11-26T12:18:58.000Z
|
examples/first_char_last_column.py
|
timilchene/sta141c-winter19
|
129704ba0952a4b80f9b093dcfa49f49f37b052d
|
[
"MIT"
] | 1
|
2017-06-25T05:35:24.000Z
|
2017-06-25T05:35:24.000Z
|
examples/first_char_last_column.py
|
timilchene/sta141c-winter19
|
129704ba0952a4b80f9b093dcfa49f49f37b052d
|
[
"MIT"
] | 22
|
2019-01-08T20:02:15.000Z
|
2021-12-16T23:27:56.000Z
|
#!/usr/bin/env python3
"""
For the last column, print only the first character.
Usage:
$ printf "100,200\n0,\n" | python3 first_char_last_column.py
Should print "100,2\n0,"
"""
import csv
from sys import stdin, stdout
def main():
reader = csv.reader(stdin)
writer = csv.writer(stdout)
for row in reader:
try:
row[-1] = row[-1][0]
except IndexError:
# Python: Better to ask forgiveness than permission
# Alternative: Look before you leap
pass
writer.writerow(row)
if __name__ == "__main__":
main()
| 19.258065
| 64
| 0.606365
| 79
| 597
| 4.443038
| 0.696203
| 0.05698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03972
| 0.283082
| 597
| 30
| 65
| 19.9
| 0.780374
| 0.425461
| 0
| 0
| 0
| 0
| 0.024242
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0.076923
| 0.153846
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
d9fb744315858b3e553e097f0866c6de49262adf
| 1,996
|
py
|
Python
|
env_ci.py
|
reloadware/stickybeak
|
8ac52a80849a3098fb6b2f47115970a734a73c14
|
[
"Apache-2.0"
] | null | null | null |
env_ci.py
|
reloadware/stickybeak
|
8ac52a80849a3098fb6b2f47115970a734a73c14
|
[
"Apache-2.0"
] | null | null | null |
env_ci.py
|
reloadware/stickybeak
|
8ac52a80849a3098fb6b2f47115970a734a73c14
|
[
"Apache-2.0"
] | 1
|
2022-01-01T15:14:42.000Z
|
2022-01-01T15:14:42.000Z
|
from pathlib import Path
root = Path(__file__).parent.absolute()
import envo
envo.add_source_roots([root])
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
from envo import Env, Namespace, env_var, logger, run
from env_comm import StickybeakCommEnv as ParentEnv
p = Namespace("p")
class StickybeakCiEnv(ParentEnv):
class Meta(ParentEnv.Meta):
stage: str = "ci"
emoji: str = "⚙"
load_env_vars = True
class Environ(ParentEnv.Environ):
pypi_username: Optional[str] = env_var(raw=True)
pypi_password: Optional[str] = env_var(raw=True)
e: Environ
def init(self) -> None:
super().init()
@p.command
def bootstrap(self, test_apps=True) -> None:
super().bootstrap(test_apps)
@p.command
def test(self) -> None:
run("pytest --reruns 2 -v tests")
@p.command
def build(self) -> None:
run("poetry build")
@p.command
def publish(self) -> None:
run(f'poetry publish --username "{self.e.pypi_username}" --password "{self.e.pypi_password}"', verbose=False)
@p.command
def rstcheck(self) -> None:
pass
# run("rstcheck README.rst | tee ./workspace/rstcheck.txt")
@p.command
def flake(self) -> None:
pass
# run("flake8 . | tee ./workspace/flake8.txt")
@p.command
def check_black(self) -> None:
run("black --check .")
@p.command
def check_isort(self) -> None:
run("black --check .")
@p.command
def mypy(self) -> None:
pass
run("mypy .")
@p.command
def generate_version(self) -> None:
import toml
config = toml.load(str(self.meta.root / "pyproject.toml"))
version: str = config["tool"]["poetry"]["version"]
version_file = self.meta.root / "stickybeak/__version__.py"
Path(version_file).touch()
version_file.write_text(f'__version__ = "{version}"\n')
ThisEnv = StickybeakCiEnv
| 22.942529
| 117
| 0.613727
| 251
| 1,996
| 4.752988
| 0.36255
| 0.067058
| 0.092205
| 0.03772
| 0.093881
| 0.093881
| 0.053646
| 0.053646
| 0
| 0
| 0
| 0.002003
| 0.249499
| 1,996
| 86
| 118
| 23.209302
| 0.793725
| 0.051102
| 0
| 0.298246
| 0
| 0
| 0.130619
| 0.038604
| 0
| 0
| 0
| 0
| 0
| 1
| 0.192982
| false
| 0.087719
| 0.122807
| 0
| 0.385965
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
d9fe5aa1f8632d451d56260ea6fb9079bd975a31
| 475
|
py
|
Python
|
bsp/nrf5x/tools/sdk_dist.py
|
BreederBai/rt-thread
|
53ed0314982556dfa9c5db75d4f3e02485d16ab5
|
[
"Apache-2.0"
] | 7,482
|
2015-01-01T09:23:08.000Z
|
2022-03-31T19:34:05.000Z
|
bsp/nrf5x/tools/sdk_dist.py
|
ArdaFu/rt-thread
|
eebb2561ec166e0016187c7b7998ada4f8212b3a
|
[
"Apache-2.0"
] | 2,543
|
2015-01-09T02:01:34.000Z
|
2022-03-31T23:10:14.000Z
|
bsp/nrf5x/tools/sdk_dist.py
|
ArdaFu/rt-thread
|
eebb2561ec166e0016187c7b7998ada4f8212b3a
|
[
"Apache-2.0"
] | 4,645
|
2015-01-06T07:05:31.000Z
|
2022-03-31T18:21:50.000Z
|
import os
import sys
import shutil
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(cwd_path), 'rt-thread', 'tools'))
# BSP dist function
def dist_do_building(BSP_ROOT, dist_dir):
from mkdist import bsp_copy_files
import rtconfig
library_dir = os.path.join(dist_dir, 'libraries')
print("=> copy nrf52 bsp libraries")
library_path = os.path.join(os.path.dirname(BSP_ROOT), 'libraries')
bsp_copy_files(library_path, library_dir)
| 26.388889
| 78
| 0.734737
| 74
| 475
| 4.5
| 0.405405
| 0.09009
| 0.09009
| 0.072072
| 0.138138
| 0.138138
| 0
| 0
| 0
| 0
| 0
| 0.004926
| 0.145263
| 475
| 17
| 79
| 27.941176
| 0.815271
| 0.035789
| 0
| 0
| 0
| 0
| 0.129386
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.416667
| 0
| 0.5
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
8a040db174b4e066ad1fcf13a9fc64667e2a81e2
| 274
|
py
|
Python
|
leetCode/algorithms/easy/count_and_say.py
|
ferhatelmas/algo
|
a7149c7a605708bc01a5cd30bf5455644cefd04d
|
[
"WTFPL"
] | 25
|
2015-01-21T16:39:18.000Z
|
2021-05-24T07:01:24.000Z
|
leetCode/algorithms/easy/count_and_say.py
|
gauravsingh58/algo
|
397859a53429e7a585e5f6964ad24146c6261326
|
[
"WTFPL"
] | 2
|
2020-09-30T19:39:36.000Z
|
2020-10-01T17:15:16.000Z
|
leetCode/algorithms/easy/count_and_say.py
|
ferhatelmas/algo
|
a7149c7a605708bc01a5cd30bf5455644cefd04d
|
[
"WTFPL"
] | 15
|
2015-01-21T16:39:27.000Z
|
2020-10-01T17:00:22.000Z
|
from itertools import groupby
class Solution:
def countAndSay(self, n):
def gen(s):
return "".join(str(len(list(g))) + k for k, g in groupby(s))
s, i = "1", 1
while i < n:
s = gen(s)
i += 1
return s
| 19.571429
| 72
| 0.463504
| 40
| 274
| 3.175
| 0.6
| 0.062992
| 0.047244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018519
| 0.408759
| 274
| 13
| 73
| 21.076923
| 0.765432
| 0
| 0
| 0
| 0
| 0
| 0.00365
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.1
| 0.1
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
8a047dbb3e81227c03ec206589ca325125601905
| 1,721
|
py
|
Python
|
app/blueprints/department_blueprint.py
|
Maxcutex/personal_ecommerce
|
be09fb20eae1b225523acde06f8e75effcc3676f
|
[
"MIT"
] | null | null | null |
app/blueprints/department_blueprint.py
|
Maxcutex/personal_ecommerce
|
be09fb20eae1b225523acde06f8e75effcc3676f
|
[
"MIT"
] | 2
|
2019-05-21T08:44:29.000Z
|
2021-04-30T20:46:08.000Z
|
app/blueprints/department_blueprint.py
|
Maxcutex/personal_ecommerce
|
be09fb20eae1b225523acde06f8e75effcc3676f
|
[
"MIT"
] | null | null | null |
from flasgger import swag_from
from app.blueprints.base_blueprint import Blueprint, BaseBlueprint, request, Security, Auth
from app.controllers.department_controller import DepartmentController
url_prefix = '{}/departments'.format(BaseBlueprint.base_url_prefix)
department_blueprint = Blueprint('department', __name__, url_prefix=url_prefix)
department_controller = DepartmentController(request)
@department_blueprint.route('/', methods=['GET'])
@Auth.has_permission('view_department')
@swag_from('documentation/get_all_departments.yml')
def list_departments():
return department_controller.list_departments()
@department_blueprint.route('/<int:department_id>', methods=['GET'])
@Auth.has_permission('view_department')
@swag_from('documentation/get_single_department.yml')
def get_department(department_id):
return department_controller.get_department(department_id)
@department_blueprint.route('/', methods=['POST'])
@Auth.has_role('admin')
@Security.validator(['name|required:ifExists_Department_name', 'description|required'])
@swag_from('documentation/create_department.yml')
def create_department():
return department_controller.create_department()
@department_blueprint.route('/<int:department_id>', methods=['DELETE'])
@Auth.has_role('admin')
@swag_from('documentation/delete_department.yml')
def delete_department(department_id):
return department_controller.delete_department(department_id)
@department_blueprint.route('/<int:department_id>', methods=['PATCH'])
@Auth.has_role('admin')
@Security.validator(['name|optional', 'description|optional'])
@swag_from('documentation/update_department.yml')
def update_department(department_id):
return department_controller.update_department(department_id)
| 41.97561
| 91
| 0.820453
| 199
| 1,721
| 6.773869
| 0.241206
| 0.080119
| 0.097923
| 0.060089
| 0.405786
| 0.405786
| 0.248516
| 0.096439
| 0.096439
| 0.096439
| 0
| 0
| 0.048809
| 1,721
| 40
| 92
| 43.025
| 0.823458
| 0
| 0
| 0.151515
| 0
| 0
| 0.246368
| 0.127252
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151515
| false
| 0
| 0.090909
| 0.151515
| 0.393939
| 0.242424
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 1
|
8a06d974512def3c400fb25769c0185d59195602
| 1,405
|
py
|
Python
|
baymax/api.py
|
dmrz/baymax
|
60cca5ae2e7cb42e093747f91b809e34e6782fcd
|
[
"MIT"
] | 34
|
2018-02-14T09:37:26.000Z
|
2021-02-13T10:06:54.000Z
|
baymax/api.py
|
Avishekbhattacharjee/baymax
|
487930c4f3021ff50504d371de09ff31e458c09f
|
[
"MIT"
] | 1
|
2018-03-03T02:55:38.000Z
|
2018-03-17T21:57:15.000Z
|
baymax/api.py
|
Avishekbhattacharjee/baymax
|
487930c4f3021ff50504d371de09ff31e458c09f
|
[
"MIT"
] | 7
|
2018-02-28T07:35:35.000Z
|
2022-01-26T11:54:40.000Z
|
import json
import aiohttp
async def request(url, payload=None, params=None, headers=None):
headers = {'content-type': 'application/json', **(headers or {})}
data = payload and json.dumps(payload)
async with aiohttp.ClientSession() as client:
async with client.post(
url, data=data, params=params, headers=headers) as resp:
# TODO: Check response status
json_response = await resp.json()
return json_response
async def get_updates(base_url, timeout, offset):
params = {
'timeout': timeout,
'offset': offset
}
return await request(f'{base_url}/getUpdates', params=params)
async def send_message(base_url, chat_id, text, reply_markup=None):
payload = {
'chat_id': chat_id,
'text': text
}
if reply_markup is not None:
payload['reply_markup'] = reply_markup
return await request(f'{base_url}/sendMessage', payload)
async def answer_callback_query(
base_url, callback_query_id, text, show_alert,
url=None, cache_time=None):
payload = {
'callback_query_id': callback_query_id,
'text': text,
'show_alert': show_alert
}
if url is not None:
payload['url'] = url
if cache_time is not None:
payload['cache_time'] = cache_time
return await request(f'{base_url}/answerCallbackQuery', payload)
| 29.270833
| 72
| 0.641993
| 176
| 1,405
| 4.943182
| 0.318182
| 0.048276
| 0.062069
| 0.065517
| 0.089655
| 0.089655
| 0
| 0
| 0
| 0
| 0
| 0
| 0.251957
| 1,405
| 47
| 73
| 29.893617
| 0.827783
| 0.019217
| 0
| 0.054054
| 0
| 0
| 0.131541
| 0.053052
| 0
| 0
| 0
| 0.021277
| 0
| 1
| 0
| false
| 0
| 0.054054
| 0
| 0.162162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a0988ba1c9ee5db70eabfa7b9b35ad041f9c1f7
| 2,238
|
py
|
Python
|
pymatgen/analysis/tests/test_piezo.py
|
exenGT/pymatgen
|
a8ffb820ab8fc3f60251099e38c8888f45eae618
|
[
"MIT"
] | 1
|
2021-11-02T21:10:11.000Z
|
2021-11-02T21:10:11.000Z
|
pymatgen/analysis/tests/test_piezo.py
|
exenGT/pymatgen
|
a8ffb820ab8fc3f60251099e38c8888f45eae618
|
[
"MIT"
] | 5
|
2018-08-07T23:00:23.000Z
|
2021-01-05T22:46:23.000Z
|
pymatgen/analysis/tests/test_piezo.py
|
exenGT/pymatgen
|
a8ffb820ab8fc3f60251099e38c8888f45eae618
|
[
"MIT"
] | 6
|
2019-04-26T18:50:41.000Z
|
2020-03-29T17:58:34.000Z
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Test for the piezo tensor class
"""
__author__ = "Shyam Dwaraknath"
__version__ = "0.1"
__maintainer__ = "Shyam Dwaraknath"
__email__ = "[email protected]"
__status__ = "Development"
__date__ = "4/1/16"
import os
import unittest
import numpy as np
from pymatgen.analysis.piezo import PiezoTensor
from pymatgen.util.testing import PymatgenTest
class PiezoTest(PymatgenTest):
def setUp(self):
self.piezo_struc = self.get_structure("BaNiO3")
self.voigt_matrix = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.03839, 0.0],
[0.0, 0.0, 0.0, 0.03839, 0.0, 0.0],
[6.89822, 6.89822, 27.46280, 0.0, 0.0, 0.0],
]
)
self.vasp_matrix = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.03839],
[0.0, 0.0, 0.0, 0.0, 0.03839, 0.0, 0.0],
[6.89822, 6.89822, 27.46280, 0.0, 0.0, 0.0],
]
)
self.full_tensor_array = [
[[0.0, 0.0, 0.03839], [0.0, 0.0, 0.0], [0.03839, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.03839], [0.0, 0.03839, 0.0]],
[[6.89822, 0.0, 0.0], [0.0, 6.89822, 0.0], [0.0, 0.0, 27.4628]],
]
def test_new(self):
pt = PiezoTensor(self.full_tensor_array)
self.assertArrayAlmostEqual(pt, self.full_tensor_array)
bad_dim_array = np.zeros((3, 3))
self.assertRaises(ValueError, PiezoTensor, bad_dim_array)
def test_from_voigt(self):
bad_voigt = np.zeros((3, 7))
pt = PiezoTensor.from_voigt(self.voigt_matrix)
self.assertArrayEqual(pt, self.full_tensor_array)
self.assertRaises(ValueError, PiezoTensor.from_voigt, bad_voigt)
self.assertArrayEqual(self.voigt_matrix, pt.voigt)
def test_from_vasp_voigt(self):
bad_voigt = np.zeros((3, 7))
pt = PiezoTensor.from_vasp_voigt(self.vasp_matrix)
self.assertArrayEqual(pt, self.full_tensor_array)
self.assertRaises(ValueError, PiezoTensor.from_voigt, bad_voigt)
self.assertArrayEqual(self.voigt_matrix, pt.voigt)
if __name__ == "__main__":
unittest.main()
| 31.521127
| 76
| 0.594281
| 331
| 2,238
| 3.809668
| 0.232628
| 0.137986
| 0.171293
| 0.183981
| 0.513085
| 0.478192
| 0.470262
| 0.470262
| 0.470262
| 0.470262
| 0
| 0.127108
| 0.258266
| 2,238
| 70
| 77
| 31.971429
| 0.63253
| 0.054066
| 0
| 0.192308
| 0
| 0
| 0.037951
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0.076923
| false
| 0
| 0.096154
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a13a931088f76e07468fa49084284d44b5cf0eb
| 936
|
py
|
Python
|
autolatex-master/exemplos_codigo/certificados/certificados.py
|
luizgui05/autolatex.
|
366eb3d88b7e60c119737f958e35cce99e8775e9
|
[
"MIT"
] | null | null | null |
autolatex-master/exemplos_codigo/certificados/certificados.py
|
luizgui05/autolatex.
|
366eb3d88b7e60c119737f958e35cce99e8775e9
|
[
"MIT"
] | null | null | null |
autolatex-master/exemplos_codigo/certificados/certificados.py
|
luizgui05/autolatex.
|
366eb3d88b7e60c119737f958e35cce99e8775e9
|
[
"MIT"
] | null | null | null |
import os
import sys
import sqlite3
con = None
filename = 'certificado'
# Abrir banco de dados para ler nomes.
try:
con = sqlite3.connect('math.db')
cur = con.cursor()
cur.execute('select * from math')
data = cur.fetchall()
except sqlite3.Error, e:
print "Error %s:" % e.args[0]
sys.exit(1)
finally:
if con:
con.close()
# Gerar um certificado para cada nome.
for row in data:
f = open(filename+'.tex','r+')
old = f.readlines()
if old[0][1:4] == 'def':
offset = 1
else:
offset = 0
f.seek(0)
f.write('\\def\\name {'+row[0]+'}\n')
f.writelines(old[offset:])
f.close()
# Compilar arquivo LaTeX
try:
os.system('pdflatex '+filename+'.tex')
os.system('mv '+filename+'.pdf '+filename+'_'+row[0].replace(' ','_')+'.pdf')
#os.system('xdg-open '+filename+'.pdf &')
except OSError:
print('LaTeX not installed.')
| 20.8
| 85
| 0.569444
| 128
| 936
| 4.148438
| 0.5625
| 0.045198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018545
| 0.251068
| 936
| 44
| 86
| 21.272727
| 0.738944
| 0.145299
| 0
| 0.0625
| 0
| 0
| 0.148428
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.09375
| null | null | 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a1f6ceee24cfa74cb693e71048a38117f2ad54b
| 907
|
py
|
Python
|
base/admin.py
|
ExpertOfNone/expert_of_none
|
9ff4e4279a570712766546122c014c754f753485
|
[
"MIT"
] | null | null | null |
base/admin.py
|
ExpertOfNone/expert_of_none
|
9ff4e4279a570712766546122c014c754f753485
|
[
"MIT"
] | null | null | null |
base/admin.py
|
ExpertOfNone/expert_of_none
|
9ff4e4279a570712766546122c014c754f753485
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from base.models import Topic, Photo
class EONBaseAdmin(admin.ModelAdmin):
def get_changeform_initial_data(self, request):
initial = super().get_changeform_initial_data(request)
if 'add' in request.META['PATH_INFO']:
initial['created_by'] = request.user
initial['modified_by'] = request.user
return initial
def save_model(self, request, obj, form, change):
if not obj.created_by:
obj.created_by = request.user
return super().save_model(request, obj, form, change)
class TopicAdmin(EONBaseAdmin):
list_display = [
'name', 'parent_topic', 'top_level', 'modified_by', 'modified', 'created_by', 'created',
]
class PhotoAdmin(EONBaseAdmin):
# TODO Add Proper List Display
pass
admin.site.register(Topic, TopicAdmin)
admin.site.register(Photo, PhotoAdmin)
| 21.093023
| 96
| 0.675854
| 109
| 907
| 5.458716
| 0.458716
| 0.060504
| 0.065546
| 0.080672
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216097
| 907
| 42
| 97
| 21.595238
| 0.83685
| 0.030871
| 0
| 0
| 0
| 0
| 0.107184
| 0
| 0
| 0
| 0
| 0.02381
| 0
| 1
| 0.095238
| false
| 0.047619
| 0.095238
| 0
| 0.47619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a2014bc56418a4e4967160efe3f9656c573b77f
| 1,432
|
py
|
Python
|
glue/__init__.py
|
HPLegion/glue
|
1843787ccb4de852dfe103ff58473da13faccf5f
|
[
"BSD-3-Clause"
] | 550
|
2015-01-08T13:51:06.000Z
|
2022-03-31T11:54:47.000Z
|
glue/__init__.py
|
HPLegion/glue
|
1843787ccb4de852dfe103ff58473da13faccf5f
|
[
"BSD-3-Clause"
] | 1,362
|
2015-01-03T19:15:52.000Z
|
2022-03-30T13:23:11.000Z
|
glue/__init__.py
|
HPLegion/glue
|
1843787ccb4de852dfe103ff58473da13faccf5f
|
[
"BSD-3-Clause"
] | 142
|
2015-01-08T13:08:00.000Z
|
2022-03-18T13:25:57.000Z
|
# Set up configuration variables
__all__ = ['custom_viewer', 'qglue', 'test']
import os
import sys
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution('glue-core').version
except DistributionNotFound:
__version__ = 'undefined'
from ._mpl_backend import MatplotlibBackendSetter
sys.meta_path.append(MatplotlibBackendSetter())
from glue.viewers.custom.helper import custom_viewer
# Load user's configuration file
from .config import load_configuration
env = load_configuration()
from .qglue import qglue
from .main import load_plugins # noqa
def test(no_optional_skip=False):
from pytest import main
root = os.path.abspath(os.path.dirname(__file__))
args = [root, '-x']
if no_optional_skip:
args.append('--no-optional-skip')
return main(args=args)
from glue._settings_helpers import load_settings
load_settings()
# In PyQt 5.5+, PyQt overrides the default exception catching and fatally
# crashes the Qt application without printing out any details about the error.
# Below we revert the exception hook to the original Python one. Note that we
# can't just do sys.excepthook = sys.__excepthook__ otherwise PyQt will detect
# the default excepthook is in place and override it.
def handle_exception(exc_type, exc_value, exc_traceback):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
sys.excepthook = handle_exception
| 26.036364
| 78
| 0.775838
| 195
| 1,432
| 5.435897
| 0.528205
| 0.049057
| 0.039623
| 0.028302
| 0.075472
| 0.075472
| 0.075472
| 0.075472
| 0
| 0
| 0
| 0.001642
| 0.149441
| 1,432
| 54
| 79
| 26.518519
| 0.868637
| 0.293296
| 0
| 0
| 0
| 0
| 0.059821
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.37037
| 0
| 0.481481
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
8a20b1d12635ada6c636b100e165021b86485320
| 2,854
|
py
|
Python
|
main.py
|
vkumarma/Complete-Interpreter
|
5ec15ea84b0e7e735328511cc504efa43638f720
|
[
"MIT"
] | null | null | null |
main.py
|
vkumarma/Complete-Interpreter
|
5ec15ea84b0e7e735328511cc504efa43638f720
|
[
"MIT"
] | null | null | null |
main.py
|
vkumarma/Complete-Interpreter
|
5ec15ea84b0e7e735328511cc504efa43638f720
|
[
"MIT"
] | null | null | null |
import re
import sys
class Lexer:
def __init__(self, inp_str):
self.index = 0
self.s = inp_str
def get_char(self):
if self.index < len(self.s):
var = self.s[self.index]
self.index += 1
return var
input_file = open(str(sys.argv[1]), 'r') # Open file for reading
line = input_file.read()
# "if z then while x * 4 - 2 do skip endwhile else x := 7 endif; y := 1"
input_string = line.strip("\n")
lexer = Lexer(input_string)
hashtable = {}
tokens_list = []
def token_check(input):
if re.fullmatch("if|then|else|endif|while|do|endwhile|skip", input):
hashtable[input] = "KEYWORD"
tokens_list.append(input)
elif re.search("([a-z]|[A-Z])([a-z]|[A-Z]|[0-9])*", input):
hashtable[input] = "IDENTIFIER"
tokens_list.append(input)
elif re.search("[0-9]+", input):
hashtable[input] = "NUMBER"
tokens_list.append(input)
elif re.fullmatch("\+|\-|\*|/|\(|\)|:=|;", input):
hashtable[input] = "SYMBOL"
tokens_list.append(input)
else:
hashtable[input] = "ERROR READING"
def digit(curr_char, lexer):
sub = ""
while (curr_char.isdigit()):
sub += curr_char
curr_char = lexer.get_char()
if curr_char == None:
break
new.append(curr_char)
return sub
def longest_sub_string(curr_char, lexer):
sub = ""
while (curr_char.isalpha() or curr_char.isdigit()):
sub += curr_char
curr_char = lexer.get_char()
if curr_char == None:
break
new.append(curr_char)
return sub
def symbol(curr_char, lexer):
# print(curr_char)
sym = curr_char
curr_char = lexer.get_char()
new.append(curr_char)
return sym
def assignment(curr_char, lexer):
sub = curr_char
next_char = lexer.get_char()
if next_char == "=":
sub += next_char
new.append(next_char)
return sub
new.append(lexer.get_char())
return sub
new = [] # keeping track of current char.
curr_char = lexer.get_char()
while (curr_char != None):
while (curr_char == ' ' or curr_char == ''):
curr_char = lexer.get_char()
if (curr_char.isdigit()):
token_check(digit(curr_char, lexer))
curr_char = new.pop()
elif (curr_char.isalpha()):
token_check(longest_sub_string(curr_char, lexer))
curr_char = new.pop()
elif curr_char in "+-/*();":
token_check(symbol(curr_char, lexer))
curr_char = new.pop()
elif curr_char == ":":
token_check(assignment(curr_char, lexer))
curr_char = new.pop()
if curr_char == "=":
curr_char = lexer.get_char()
else:
token_check(curr_char)
curr_char = lexer.get_char()
def tokens():
return hashtable
# print(tokens_list)
# print(tokens())
| 23.983193
| 72
| 0.590049
| 383
| 2,854
| 4.185379
| 0.211488
| 0.204616
| 0.121647
| 0.07985
| 0.46413
| 0.412352
| 0.355583
| 0.213974
| 0.213974
| 0.213974
| 0
| 0.005261
| 0.267344
| 2,854
| 118
| 73
| 24.186441
| 0.761358
| 0.061317
| 0
| 0.363636
| 0
| 0.011364
| 0.058757
| 0.035554
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.022727
| 0.011364
| 0.204545
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a20dee928bb3a353769ebc5d7c40156ab5eb131
| 306
|
py
|
Python
|
deduplicate.py
|
Ghostofapacket/NewsGrabber-Deduplicate
|
0b8152af2e1c6c87cf8540970f42084b96a99d9c
|
[
"Unlicense"
] | null | null | null |
deduplicate.py
|
Ghostofapacket/NewsGrabber-Deduplicate
|
0b8152af2e1c6c87cf8540970f42084b96a99d9c
|
[
"Unlicense"
] | null | null | null |
deduplicate.py
|
Ghostofapacket/NewsGrabber-Deduplicate
|
0b8152af2e1c6c87cf8540970f42084b96a99d9c
|
[
"Unlicense"
] | null | null | null |
import sys
sys.path.append('/usr/local/lib/python3.4/site-packages/')
from warc_dedup import deduplicate
def main():
if len(sys.argv) == 1:
raise Exception('Please provide the WARC file as argument.')
deduplicate.Warc(*sys.argv[1:]).deduplicate()
if __name__ == '__main__':
main()
| 20.4
| 68
| 0.679739
| 43
| 306
| 4.627907
| 0.697674
| 0.070352
| 0.080402
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015748
| 0.169935
| 306
| 14
| 69
| 21.857143
| 0.767717
| 0
| 0
| 0
| 0
| 0
| 0.288525
| 0.127869
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| true
| 0
| 0.222222
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a29eefe067ae42942e4915562e64419af3d1cde
| 950
|
py
|
Python
|
scripts_python3/exchange/deleteExchange.py
|
bcvsolutions/winrm-ad-connector
|
9b45dae78d3ba24fe6b00e090f8763d3162e1570
|
[
"Apache-2.0"
] | null | null | null |
scripts_python3/exchange/deleteExchange.py
|
bcvsolutions/winrm-ad-connector
|
9b45dae78d3ba24fe6b00e090f8763d3162e1570
|
[
"Apache-2.0"
] | 2
|
2020-05-27T07:15:28.000Z
|
2020-12-17T05:22:54.000Z
|
scripts_python3/exchange/deleteExchange.py
|
bcvsolutions/winrm-ad-connector
|
9b45dae78d3ba24fe6b00e090f8763d3162e1570
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# All params from IdM is stored in environment and you can get them by os.environ["paramName"]
import sys, os
# this is needed for importing file winrm_wrapper from parent dir
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import winrm_wrapper
import codecs
uid = os.environ["__UID__"]
winrm_wrapper.writeLog("Delete start for " + uid)
# Load PS script from file and replace params
winrm_wrapper.writeLog("loading script")
f = codecs.open(os.environ["script"], encoding='utf-8', mode='r')
command = f.read()
command = command.replace("$uid", uid)
# Call wrapper
winrm_wrapper.executeScript(os.environ["endpoint"], os.environ["authentication"], os.environ["user"],
os.environ["password"], os.environ["caTrustPath"], os.environ["ignoreCaValidation"], command, uid)
winrm_wrapper.writeLog("Delete end for " + uid)
print("__UID__=" + uid)
sys.exit()
| 35.185185
| 134
| 0.705263
| 133
| 950
| 4.902256
| 0.503759
| 0.124233
| 0.092025
| 0.070552
| 0.088957
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00369
| 0.144211
| 950
| 26
| 135
| 36.538462
| 0.798278
| 0.270526
| 0
| 0
| 0
| 0
| 0.206696
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.066667
| 0.2
| 0
| 0.2
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
8a2ac410faa6645af8d41c21c8f5834684cf1a20
| 2,152
|
py
|
Python
|
tests/registry_test.py
|
Walon1998/dace
|
95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0
|
[
"BSD-3-Clause"
] | 1
|
2022-03-11T13:36:34.000Z
|
2022-03-11T13:36:34.000Z
|
tests/registry_test.py
|
Walon1998/dace
|
95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/registry_test.py
|
Walon1998/dace
|
95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import unittest
from aenum import Enum, auto
from dace import registry
@registry.make_registry
class ExtensibleClass(object):
pass
class Extension(ExtensibleClass):
pass
@registry.extensible_enum
class ExtensibleEnumeration(Enum):
a = auto()
b = auto()
class RegistryTests(unittest.TestCase):
def test_class_registry(self):
ExtensibleClass.register(Extension)
self.assertTrue(Extension in ExtensibleClass.extensions())
ExtensibleClass.unregister(Extension)
self.assertTrue(Extension not in ExtensibleClass.extensions())
def test_autoregister(self):
@registry.autoregister
class Extension2(ExtensibleClass):
pass
self.assertTrue(Extension2 in ExtensibleClass.extensions())
def test_class_registry_args(self):
ExtensibleClass.register(Extension, a=True, b=1, c=2)
self.assertTrue(Extension in ExtensibleClass.extensions())
self.assertEqual(ExtensibleClass.extensions()[Extension], dict(a=True, b=1, c=2))
ExtensibleClass.unregister(Extension)
self.assertTrue(Extension not in ExtensibleClass.extensions())
def test_autoregister_args(self):
@registry.autoregister_params(a=False, b=0)
class Extension3(ExtensibleClass):
pass
self.assertTrue(Extension3 in ExtensibleClass.extensions())
self.assertEqual(ExtensibleClass.extensions()[Extension3], dict(a=False, b=0))
def test_autoregister_fail(self):
with self.assertRaises(TypeError):
@registry.autoregister
class Extension4(object):
pass
def test_enum_registry(self):
ExtensibleEnumeration.register('c')
self.assertTrue(ExtensibleEnumeration.c in ExtensibleEnumeration)
self.assertEqual(ExtensibleEnumeration.c.value, 3)
def test_enum_registry_fail(self):
with self.assertRaises(TypeError):
@registry.extensible_enum
class NotAnEnum(object):
pass
if __name__ == '__main__':
unittest.main()
| 29.479452
| 89
| 0.697955
| 220
| 2,152
| 6.704545
| 0.290909
| 0.135593
| 0.109831
| 0.065085
| 0.380339
| 0.357288
| 0.295593
| 0.143729
| 0.143729
| 0.143729
| 0
| 0.012419
| 0.214219
| 2,152
| 72
| 90
| 29.888889
| 0.859846
| 0.033922
| 0
| 0.352941
| 0
| 0
| 0.004333
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 1
| 0.137255
| false
| 0.117647
| 0.058824
| 0
| 0.392157
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
8a2f400a7655554fbc57b5f622cd3afad8069e45
| 427
|
py
|
Python
|
gcp-python-fn/main.py
|
FuriKuri/faas-playground
|
52618e21064e327d2874d2b73cfe5fb247d3dd6e
|
[
"MIT"
] | 1
|
2019-05-07T13:15:16.000Z
|
2019-05-07T13:15:16.000Z
|
gcp-python-fn/main.py
|
FuriKuri/faas-playground
|
52618e21064e327d2874d2b73cfe5fb247d3dd6e
|
[
"MIT"
] | null | null | null |
gcp-python-fn/main.py
|
FuriKuri/faas-playground
|
52618e21064e327d2874d2b73cfe5fb247d3dd6e
|
[
"MIT"
] | null | null | null |
def hello_world(request):
request_json = request.get_json()
name = 'World'
if request_json and 'name' in request_json:
name = request_json['name']
headers = {
'Access-Control-Allow-Origin': 'https://furikuri.net',
'Access-Control-Allow-Methods': 'GET, POST',
'Access-Control-Allow-Headers': 'Content-Type'
}
return ('Hello ' + name + '! From GCP + Python', 200, headers)
| 35.583333
| 66
| 0.620609
| 51
| 427
| 5.078431
| 0.529412
| 0.169884
| 0.208494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009091
| 0.227166
| 427
| 11
| 67
| 38.818182
| 0.775758
| 0
| 0
| 0
| 0
| 0
| 0.379391
| 0.194379
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a3245f4587a32c402e78f398ab94bc52ef0cf9a
| 780
|
py
|
Python
|
PaddleOCR/deploy/hubserving/ocr_det/params.py
|
TangJiamin/Ultra_light_OCR_No.23
|
594aa286dc2f88614141838ce45c164647226cdb
|
[
"Apache-2.0"
] | null | null | null |
PaddleOCR/deploy/hubserving/ocr_det/params.py
|
TangJiamin/Ultra_light_OCR_No.23
|
594aa286dc2f88614141838ce45c164647226cdb
|
[
"Apache-2.0"
] | null | null | null |
PaddleOCR/deploy/hubserving/ocr_det/params.py
|
TangJiamin/Ultra_light_OCR_No.23
|
594aa286dc2f88614141838ce45c164647226cdb
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Config(object):
pass
def read_params():
cfg = Config()
#params for text detector
cfg.det_algorithm = "DB"
cfg.det_model_dir = "./inference/ch_ppocr_mobile_v2.0_det_infer/"
cfg.det_limit_side_len = 960
cfg.det_limit_type = 'max'
#DB parmas
cfg.det_db_thresh = 0.3
cfg.det_db_box_thresh = 0.5
cfg.det_db_unclip_ratio = 1.6
cfg.use_dilation = False
# #EAST parmas
# cfg.det_east_score_thresh = 0.8
# cfg.det_east_cover_thresh = 0.1
# cfg.det_east_nms_thresh = 0.2
cfg.use_pdserving = False
cfg.use_tensorrt = False
return cfg
| 22.285714
| 70
| 0.661538
| 116
| 780
| 4.034483
| 0.508621
| 0.128205
| 0.102564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030822
| 0.251282
| 780
| 34
| 71
| 22.941176
| 0.770548
| 0.203846
| 0
| 0
| 0
| 0
| 0.083045
| 0.074394
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0.055556
| 0.166667
| 0
| 0.333333
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
8a328b7be397a48ed8f6202385b17e0dbf81357c
| 12,156
|
py
|
Python
|
networks/larflow/models/larflow_uresnet.py
|
LArbys/ublarcvserver
|
02381c937f49a2eab2f754017ab431c3f6fa70d7
|
[
"Apache-2.0"
] | 2
|
2020-07-09T19:34:03.000Z
|
2021-06-21T23:09:23.000Z
|
networks/larflow/models/larflow_uresnet.py
|
LArbys/ublarcvserver
|
02381c937f49a2eab2f754017ab431c3f6fa70d7
|
[
"Apache-2.0"
] | null | null | null |
networks/larflow/models/larflow_uresnet.py
|
LArbys/ublarcvserver
|
02381c937f49a2eab2f754017ab431c3f6fa70d7
|
[
"Apache-2.0"
] | null | null | null |
import torch.nn as nn
import torch as torch
import math
import torch.utils.model_zoo as model_zoo
###########################################################
#
# U-ResNet
# U-net witih ResNet modules
#
# Semantic segmentation network used by MicroBooNE
# to label track/shower pixels
#
# resnet implementation from pytorch.torchvision module
# U-net from (cite)
#
# meant to be copy of caffe version
#
###########################################################
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.stride = stride
self.bypass = None
if inplanes!=planes or stride>1:
self.bypass = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, padding=0, bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.bypass is not None:
outbp = self.bypass(x)
out += outbp
else:
out += x
out = self.relu(out)
return out
class Bottleneck(nn.Module):
def __init__(self, inplanes, planes, stride=1 ):
super(Bottleneck, self).__init__()
# residual path
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
# if stride >1, then we need to subsamble the input
if stride>1:
self.shortcut = nn.Conv2d(inplanes,planes,kernel_size=1,stride=stride,bias=False)
else:
self.shortcut = None
def forward(self, x):
if self.shortcut is None:
bypass = x
else:
bypass = self.shortcut(x)
residual = self.conv1(x)
residual = self.bn1(residual)
residual = self.relu(residual)
residual = self.conv2(residual)
residual = self.bn2(residual)
residual = self.relu(residual)
residual = self.conv3(residual)
residual = self.bn3(residual)
out = bypass+residual
out = self.relu(out)
return out
class PreactivationBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1 ):
super(Preactivation, self).__init__()
# residual path
self.bn1 = nn.BatchNorm2d(inplanes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
# if stride >1, then we need to subsamble the input
if stride>1:
self.shortcut = nn.Conv2d(inplanes,planes,kernel_size=1,stride=stride,bias=False)
else:
self.shortcut = None
def forward(self, x):
if self.shortcut is None:
bypass = x
else:
bypass = self.shortcut(x)
class DoubleResNet(nn.Module):
def __init__(self,Block,inplanes,planes,stride=1):
super(DoubleResNet,self).__init__()
self.res1 = Block(inplanes,planes,stride)
self.res2 = Block( planes,planes, 1)
def forward(self, x):
out = self.res1(x)
out = self.res2(out)
return out
class ConvTransposeLayer(nn.Module):
def __init__(self,deconv_inplanes,skip_inplanes,deconv_outplanes,res_outplanes):
super(ConvTransposeLayer,self).__init__()
self.deconv = nn.ConvTranspose2d( deconv_inplanes, deconv_outplanes, kernel_size=4, stride=2, padding=1, bias=False )
self.res = DoubleResNet(BasicBlock,deconv_outplanes+skip_inplanes,res_outplanes,stride=1)
def forward(self,x,skip_x):
out = self.deconv(x,output_size=skip_x.size())
# concat skip connections
out = torch.cat( [out,skip_x], 1 )
out = self.res(out)
return out
class LArFlowUResNet(nn.Module):
def __init__(self, num_classes=3, input_channels=3, inplanes=16, showsizes=False, use_visi=True):
self.inplanes =inplanes
super(LArFlowUResNet, self).__init__()
self._showsizes = showsizes # print size at each layer
self.use_visi = use_visi
# Encoder
# stem
# one big stem
self.conv1 = nn.Conv2d(input_channels, self.inplanes, kernel_size=7, stride=1, padding=3, bias=True) # initial conv layer
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu1 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d( 3, stride=2, padding=1 )
self.enc_layer1 = self._make_encoding_layer( self.inplanes*1, self.inplanes*2, stride=1) # 16->32
self.enc_layer2 = self._make_encoding_layer( self.inplanes*2, self.inplanes*4, stride=2) # 32->64
self.enc_layer3 = self._make_encoding_layer( self.inplanes*4, self.inplanes*8, stride=2) # 64->128
self.enc_layer4 = self._make_encoding_layer( self.inplanes*8, self.inplanes*16, stride=2) # 128->256
self.enc_layer5 = self._make_encoding_layer( self.inplanes*16, self.inplanes*32, stride=2) # 256->512
# decoding flow
#self.num_final_flow_features = self.inplanes
self.num_final_flow_features = self.inplanes
self.flow_dec_layer5 = self._make_decoding_layer( self.inplanes*32*2, self.inplanes*16, self.inplanes*16, self.inplanes*16 ) # 512->256
self.flow_dec_layer4 = self._make_decoding_layer( self.inplanes*16, self.inplanes*8, self.inplanes*8, self.inplanes*8 ) # 256->128
self.flow_dec_layer3 = self._make_decoding_layer( self.inplanes*8, self.inplanes*4, self.inplanes*4, self.inplanes*4 ) # 128->64
self.flow_dec_layer2 = self._make_decoding_layer( self.inplanes*4, self.inplanes*2, self.inplanes*2, self.inplanes*2 ) # 64->32
#self.flow_dec_layer1 = self._make_decoding_layer( self.inplanes*2, self.inplanes, self.inplanes ) # 32->16
self.flow_dec_layer1 = self._make_decoding_layer( self.inplanes*2, self.inplanes, self.inplanes, self.num_final_flow_features ) # 32->200
# decoding matchability
if self.use_visi:
self.visi_dec_layer5 = self._make_decoding_layer( self.inplanes*32*2, self.inplanes*16, self.inplanes*16, self.inplanes*16 ) # 512->256
self.visi_dec_layer4 = self._make_decoding_layer( self.inplanes*16, self.inplanes*8, self.inplanes*8, self.inplanes*8 ) # 256->128
self.visi_dec_layer3 = self._make_decoding_layer( self.inplanes*8, self.inplanes*4, self.inplanes*4, self.inplanes*4 ) # 128->64
self.visi_dec_layer2 = self._make_decoding_layer( self.inplanes*4, self.inplanes*2, self.inplanes*2, self.inplanes*2 ) # 64->32
self.visi_dec_layer1 = self._make_decoding_layer( self.inplanes*2, self.inplanes, self.inplanes, self.inplanes ) # 32->16
# 1x1 conv for flow
self.flow_conv = nn.Conv2d( self.num_final_flow_features, 1, kernel_size=1, stride=1, padding=0, bias=True )
# 1x1 conv for mathability
if self.use_visi:
self.visi_conv = nn.Conv2d( self.inplanes, 2, kernel_size=1, stride=1, padding=0, bias=True ) # 2 classes, 0=not vis, 1=vis
self.visi_softmax = nn.LogSoftmax(dim=1)
# initialization
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m,nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_encoding_layer(self, inplanes, planes, stride=2):
return DoubleResNet(BasicBlock,inplanes,planes,stride=stride)
def _make_decoding_layer(self, inplanes, skipplanes, deconvplanes, resnetplanes ):
return ConvTransposeLayer( inplanes, skipplanes, deconvplanes, resnetplanes )
def encode(self,x):
# stem
x = self.conv1(x)
x = self.bn1(x)
x0 = self.relu1(x)
x = self.pool1(x0)
x1 = self.enc_layer1(x)
x2 = self.enc_layer2(x1)
x3 = self.enc_layer3(x2)
x4 = self.enc_layer4(x3)
x5 = self.enc_layer5(x4)
if self._showsizes:
print "after encoding: "
print " x1: ",x1.size()
print " x2: ",x2.size()
print " x3: ",x3.size()
print " x4: ",x4.size()
print " x5: ",x5.size()
return x5,x0,x1,x2,x3,x4
def flow(self,merged_encode,x0,x1,x2,x3,x4):
""" decoding to flow prediction """
x = self.flow_dec_layer5(merged_encode,x4)
if self._showsizes:
print "after decoding:"
print " dec5: ",x.size()," iscuda=",x.is_cuda
x = self.flow_dec_layer4(x,x3)
if self._showsizes:
print " dec4: ",x.size()," iscuda=",x.is_cuda
x = self.flow_dec_layer3(x,x2)
if self._showsizes:
print " dec3: ",x.size()," iscuda=",x.is_cuda
x = self.flow_dec_layer2(x,x1)
if self._showsizes:
print " dec2: ",x.size()," iscuda=",x.is_cuda
x = self.flow_dec_layer1(x,x0)
if self._showsizes:
print " dec1: ",x.size()," iscuda=",x.is_cuda
return x
def visibility(self,merged_encode,x0,x1,x2,x3,x4):
""" decoding to flow prediction """
x = self.visi_dec_layer5(merged_encode,x4)
if self._showsizes:
print "after decoding:"
print " dec5: ",x.size()," iscuda=",x.is_cuda
x = self.visi_dec_layer4(x,x3)
if self._showsizes:
print " dec4: ",x.size()," iscuda=",x.is_cuda
x = self.visi_dec_layer3(x,x2)
if self._showsizes:
print " dec3: ",x.size()," iscuda=",x.is_cuda
x = self.visi_dec_layer2(x,x1)
if self._showsizes:
print " dec2: ",x.size()," iscuda=",x.is_cuda
x = self.visi_dec_layer1(x,x0)
if self._showsizes:
print " dec1: ",x.size()," iscuda=",x.is_cuda
return x
def forward(self, src, target):
if self._showsizes:
print "input: ",x.size()," is_cuda=",x.is_cuda
src_encode, s0, s1, s2, s3, s4 = self.encode(src)
target_encode, t0, t1, t2, t3, t4 = self.encode(target)
merged_encode = torch.cat( [target_encode,src_encode], 1 )
flowout = self.flow( merged_encode, s0, s1, s2, s3, s4 )
if self.use_visi:
visiout = self.visibility( merged_encode, t0, t1, t2, t3, t4 )
flow_predict = self.flow_conv( flowout )
if self.use_visi:
visi_predict = self.visi_conv( visiout )
visi_predict = self.visi_softmax(visi_predict)
else:
visi_predict = None
if self._showsizes:
print " softmax: ",x.size()
return flow_predict,visi_predict
| 36.286567
| 152
| 0.599786
| 1,583
| 12,156
| 4.43904
| 0.131396
| 0.107585
| 0.043546
| 0.037
| 0.562971
| 0.525829
| 0.444002
| 0.410132
| 0.362317
| 0.346236
| 0
| 0.044164
| 0.27542
| 12,156
| 334
| 153
| 36.39521
| 0.753633
| 0.069842
| 0
| 0.362791
| 0
| 0
| 0.023833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.04186
| 0.018605
| null | null | 0.093023
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a3c0f03126e25cbd17946a5a7c81e22d30b3f4d
| 821
|
py
|
Python
|
palm_tree/coconut_1/models.py
|
m-hintz-42/a-palm-tree
|
57656874335f4dfae13cf720668f2c5391621618
|
[
"MIT"
] | null | null | null |
palm_tree/coconut_1/models.py
|
m-hintz-42/a-palm-tree
|
57656874335f4dfae13cf720668f2c5391621618
|
[
"MIT"
] | null | null | null |
palm_tree/coconut_1/models.py
|
m-hintz-42/a-palm-tree
|
57656874335f4dfae13cf720668f2c5391621618
|
[
"MIT"
] | null | null | null |
from palm_tree import db
class Data(db.Model):
id = db.Column(db.Integer, primary_key=True)
uuid = db.Column(db.Integer)
response = db.Column(db.Text)
datetime = db.Column(db.DateTime)
def __init__(self, uuid, response, datetime):
self.uuid = uuid
self.response = response
self.datetime = datetime
def __repr__(self):
return '<Data %r>' % self.response
#
# class Logs(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# uuid = db.Column(db.Integer)
# payload = db.Column(db.Text)
# datetime = db.Column(db.DateTime)
#
# def __init__(self, uuid, payload, datetime):
# self.uuid = uuid
# self.payload = payload
# self.datetime = datetime
#
# def __repr__(self):
# return '<Data %r>' % self.payload
| 25.65625
| 50
| 0.613886
| 105
| 821
| 4.619048
| 0.247619
| 0.131959
| 0.164948
| 0.140206
| 0.76701
| 0.668041
| 0.668041
| 0.668041
| 0.668041
| 0.668041
| 0
| 0
| 0.252132
| 821
| 31
| 51
| 26.483871
| 0.789902
| 0.460414
| 0
| 0
| 0
| 0
| 0.021028
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.083333
| 0.083333
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a3cfa083e8e8e57b0bc63b2f6a4954146234e99
| 6,995
|
py
|
Python
|
Chest X-Ray Multilabel Image classification using CNN - Pytorch/Arch2.py
|
farzanaaswin0708/CNN-for-Visual-recognition
|
db65db0a0b60e1ed2a4a418069de61936aaa9e85
|
[
"MIT"
] | null | null | null |
Chest X-Ray Multilabel Image classification using CNN - Pytorch/Arch2.py
|
farzanaaswin0708/CNN-for-Visual-recognition
|
db65db0a0b60e1ed2a4a418069de61936aaa9e85
|
[
"MIT"
] | null | null | null |
Chest X-Ray Multilabel Image classification using CNN - Pytorch/Arch2.py
|
farzanaaswin0708/CNN-for-Visual-recognition
|
db65db0a0b60e1ed2a4a418069de61936aaa9e85
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
################################################################################
# CSE 253: Programming Assignment 3
# Winter 2019
# Code author: Jenny Hamer (+ modifications by Tejash Desai)
#
# Filename: baseline_cnn.py
#
# Description:
#
# This file contains the starter code for the baseline architecture you will use
# to get a little practice with PyTorch and compare the results of with your
# improved architecture.
#
# Be sure to fill in the code in the areas marked #TODO.
################################################################################
# PyTorch and neural network imports
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as func
import torch.nn.init as torch_init
import torch.optim as optim
# Data utils and dataloader
import torchvision
from torchvision import transforms, utils
from xray_dataloader_zscored import ChestXrayDataset, create_split_loaders
import matplotlib.pyplot as plt
import numpy as np
import os
class Arch2CNN(nn.Module):
"""
<<<<<<< HEAD
conv1 -> maxpool -> conv2 -> maxpool -> conv3 -> conv4 ->maxpool -> conv5 -> conv6 -> maxpool -> conv7 -> conv8 -> maxpool -> fc1 -> fc2 -> fc3 (outputs)
=======
conv1 -> conv2 -> maxpool -> conv3 -> conv4 -> conv5 -> maxpool -> fc1 -> fc2 -> fc3 (outputs)
>>>>>>> 6652e3cfb72835ac4a7c802c9a703b59d5f63ae6
"""
def __init__(self):
super(Arch2CNN, self).__init__()
# conv1: 1 input channel, 4 output channels, [3x3] kernel size
self.conv1 = nn.Conv2d(in_channels=1, out_channels=4, kernel_size=3)
# Add batch-normalization to the outputs of conv1
self.conv1_normed = nn.BatchNorm2d(4)
# Initialized weights using the Xavier-Normal method
torch_init.xavier_normal_(self.conv1.weight)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: Fill in the remaining initializations replacing each '_' with
# the necessary value based on the provided specs for each layer
#TODO: conv2: 4 input channels, 8 output channels, [3x3] kernel, initialization: xavier
self.conv2 = nn.Conv2d(in_channels=4, out_channels=8, kernel_size=3)
self.conv2_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv2.weight)
#Maxpool
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: conv3: X input channels, 12 output channels, [8x8] kernel, initialization: xavier
self.conv3 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3)
self.conv3_normed = nn.BatchNorm2d(16)
torch_init.xavier_normal_(self.conv3.weight)
#TODO: conv4: X input channels, 10 output channels, [6x6] kernel, initialization: xavier
self.conv4 = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3)
self.conv4_normed = nn.BatchNorm2d(16)
torch_init.xavier_normal_(self.conv4.weight)
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: conv5: X input channels, 8 output channels, [5x5] kernel, initialization: xavier
self.conv5 = nn.Conv2d(in_channels=16, out_channels=8, kernel_size=3)
self.conv5_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv5.weight)
self.conv6 = nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3)
self.conv6_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv6.weight)
self.pool4 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: Apply max-pooling with a [3x3] kernel using tiling (*NO SLIDING WINDOW*)
self.conv7 = nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3)
self.conv7_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv7.weight)
self.conv8 = nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3)
self.conv8_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv8.weight)
self.pool5 = nn.MaxPool2d(kernel_size=4, stride=4)
# Define 2 fully connected layers:
#TODO: fc1
self.fc1 = nn.Linear(in_features=122*122*8, out_features=512)
self.fc1_normed = nn.BatchNorm1d(512)
torch_init.xavier_normal_(self.fc1.weight)
#TODO: fc2
self.fc2 = nn.Linear(in_features=512, out_features=128)
self.fc2_normed = nn.BatchNorm1d(128)
torch_init.xavier_normal_(self.fc2.weight)
#TODO: fc3
self.fc3 = nn.Linear(in_features=128, out_features=14)
torch_init.xavier_normal_(self.fc3.weight)
#TODO: Output layer: what should out_features be?
self.out_features = 14
def forward(self, batch):
"""Pass the batch of images through each layer of the network, applying
non-linearities after each layer.
Note that this function *needs* to be called "forward" for PyTorch to
automagically perform the forward pass.
Params:
-------
- batch: (Tensor) An input batch of images
Returns:
--------
- logits: (Variable) The output of the network
"""
# Apply first convolution, followed by ReLU non-linearity;
# use batch-normalization on its outputs
batch = func.rrelu(self.conv1_normed(self.conv1(batch)))
batch = self.pool1(batch)
# Apply conv2 and conv3 similarly
batch = func.rrelu(self.conv2_normed(self.conv2(batch)))
batch = self.pool2(batch)
batch = func.rrelu(self.conv3_normed(self.conv3(batch)))
batch = func.rrelu(self.conv4_normed(self.conv4(batch)))
batch = self.pool3(batch)
batch = func.rrelu(self.conv5_normed(self.conv5(batch)))
batch = func.rrelu(self.conv6_normed(self.conv6(batch)))
# Pass the output of conv3 to the pooling layer
batch = self.pool4(batch)
batch = func.rrelu(self.conv7_normed(self.conv7(batch)))
batch = func.rrelu(self.conv8_normed(self.conv8(batch)))
# Pass the output of conv3 to the pooling layer
batch = self.pool5(batch)
# Reshape the output of the conv3 to pass to fully-connected layer
batch = batch.view(-1, self.num_flat_features(batch))
# Connect the reshaped features of the pooled conv3 to fc1
batch = func.rrelu(self.fc1_normed(self.fc1(batch)))
batch = func.rrelu(self.fc2_normed(self.fc2(batch)))
# Connect fc1 to fc2 - this layer is slightly different than the rest (why?)
batch = self.fc3(batch)
# Return the class predictions
#TODO: apply an activition function to 'batch'
#batch = func.sigmoid(batch)
return batch
def num_flat_features(self, inputs):
# Get the dimensions of the layers excluding the inputs
size = inputs.size()[1:]
# Track the number of features
num_features = 1
for s in size:
num_features *= s
return num_features
| 36.623037
| 157
| 0.653324
| 927
| 6,995
| 4.809061
| 0.248112
| 0.031404
| 0.02961
| 0.051817
| 0.27389
| 0.192463
| 0.192463
| 0.154554
| 0.124944
| 0.053836
| 0
| 0.048191
| 0.225733
| 6,995
| 190
| 158
| 36.815789
| 0.774926
| 0.368692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015789
| 0
| 1
| 0.038462
| false
| 0
| 0.153846
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a3f2203d02e338bbadd1c557a7d415e6e39dbbc
| 379
|
py
|
Python
|
src/temp2.py
|
FabBrolMons/frbayart
|
c2b9dde730cf6d21f1c1492d0da0351c12a4dce9
|
[
"MIT"
] | null | null | null |
src/temp2.py
|
FabBrolMons/frbayart
|
c2b9dde730cf6d21f1c1492d0da0351c12a4dce9
|
[
"MIT"
] | null | null | null |
src/temp2.py
|
FabBrolMons/frbayart
|
c2b9dde730cf6d21f1c1492d0da0351c12a4dce9
|
[
"MIT"
] | null | null | null |
from w1thermsensor import W1ThermSensor
sensor = W1ThermSensor()
temperature_in_celsius = sensor.get_temperature()
temperature_in_fahrenheit = sensor.get_temperature(W1ThermSensor.DEGREES_F)
temperature_in_all_units = sensor.get_temperatures([W1ThermSensor.DEGREES_C, W1ThermSensor.DEGREES_F, W1ThermSensor.KELVIN])
print("Sensor id:" + sensor.id)
print(temperature_in_celsius)
| 42.111111
| 124
| 0.852243
| 45
| 379
| 6.844444
| 0.4
| 0.168831
| 0.12987
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019718
| 0.063325
| 379
| 8
| 125
| 47.375
| 0.847887
| 0
| 0
| 0
| 0
| 0
| 0.026385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a3ff7ca606f5ce67c32533b5892e230c75d4eb8
| 413
|
py
|
Python
|
tables/migrations/0004_auto_20200901_2004.py
|
jarnoln/exposures
|
bbae3f79078048d25b77e178db6c0801ffe9f97e
|
[
"MIT"
] | null | null | null |
tables/migrations/0004_auto_20200901_2004.py
|
jarnoln/exposures
|
bbae3f79078048d25b77e178db6c0801ffe9f97e
|
[
"MIT"
] | null | null | null |
tables/migrations/0004_auto_20200901_2004.py
|
jarnoln/exposures
|
bbae3f79078048d25b77e178db6c0801ffe9f97e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.1 on 2020-09-01 17:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tables', '0003_exposure_category'),
]
operations = [
migrations.AlterField(
model_name='exposure',
name='location',
field=models.CharField(blank=True, default='', max_length=200),
),
]
| 21.736842
| 75
| 0.605327
| 44
| 413
| 5.590909
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073333
| 0.273608
| 413
| 18
| 76
| 22.944444
| 0.746667
| 0.108959
| 0
| 0
| 1
| 0
| 0.120219
| 0.060109
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a406525f88287f3d13cd5aee631ef0cc809c7ec
| 247
|
py
|
Python
|
src/reportlab/graphics/charts/__init__.py
|
kokinomura/reportlab
|
18e39b85d7277c2b5e9218b30a7b7b0a644a3c02
|
[
"BSD-3-Clause"
] | 52
|
2016-09-30T05:53:45.000Z
|
2021-12-26T12:07:48.000Z
|
src/reportlab/graphics/charts/__init__.py
|
kokinomura/reportlab
|
18e39b85d7277c2b5e9218b30a7b7b0a644a3c02
|
[
"BSD-3-Clause"
] | 31
|
2017-01-05T06:07:28.000Z
|
2018-05-27T13:13:06.000Z
|
src/reportlab/graphics/charts/__init__.py
|
kokinomura/reportlab
|
18e39b85d7277c2b5e9218b30a7b7b0a644a3c02
|
[
"BSD-3-Clause"
] | 15
|
2016-11-03T08:50:15.000Z
|
2022-01-14T07:04:35.000Z
|
#Copyright ReportLab Europe Ltd. 2000-2016
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/__init__.py
__version__='3.3.0'
__doc__='''Business charts'''
| 41.166667
| 116
| 0.793522
| 37
| 247
| 4.972973
| 0.810811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047414
| 0.060729
| 247
| 5
| 117
| 49.4
| 0.74569
| 0.773279
| 0
| 0
| 0
| 0
| 0.377358
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a44052cfce16663b8820adca1028bccdfa9a1aa
| 438
|
py
|
Python
|
CodeForces/A2OJ Ladder/softuni_problem.py
|
dimitrov-dimitar/competitive-programming
|
f2b022377baf6d4beff213fc513907b774c12352
|
[
"MIT"
] | null | null | null |
CodeForces/A2OJ Ladder/softuni_problem.py
|
dimitrov-dimitar/competitive-programming
|
f2b022377baf6d4beff213fc513907b774c12352
|
[
"MIT"
] | null | null | null |
CodeForces/A2OJ Ladder/softuni_problem.py
|
dimitrov-dimitar/competitive-programming
|
f2b022377baf6d4beff213fc513907b774c12352
|
[
"MIT"
] | null | null | null |
total_budget = 0
while True:
destination = input()
if destination == "End":
break
minimal_budget = float(input())
while True:
command = input()
if command == "End":
break
money = float(command)
total_budget += money
if total_budget >= minimal_budget:
print(f"Going to {destination}!")
total_budget = 0
break
| 24.333333
| 46
| 0.513699
| 43
| 438
| 5.093023
| 0.418605
| 0.200913
| 0.109589
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007547
| 0.394977
| 438
| 17
| 47
| 25.764706
| 0.818868
| 0
| 0
| 0.4375
| 0
| 0
| 0.068884
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.0625
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a441182ab86ba1e69b301671e3fe079d2030d2e
| 406
|
py
|
Python
|
footmark/ram/regioninfo.py
|
rockzhu/footmark
|
af2144e9139a63b475fa2b56c3307ddfd49c43e4
|
[
"Apache-2.0"
] | null | null | null |
footmark/ram/regioninfo.py
|
rockzhu/footmark
|
af2144e9139a63b475fa2b56c3307ddfd49c43e4
|
[
"Apache-2.0"
] | null | null | null |
footmark/ram/regioninfo.py
|
rockzhu/footmark
|
af2144e9139a63b475fa2b56c3307ddfd49c43e4
|
[
"Apache-2.0"
] | null | null | null |
from footmark.regioninfo import RegionInfo
class RAMRegionInfo(RegionInfo):
"""
Represents an ram Region
"""
def __init__(self, connection=None, name=None, id=None,
connection_cls=None):
from footmark.ram.connection import RAMConnection
super(RAMRegionInfo, self).__init__(connection, name, id,
RAMConnection)
| 29
| 65
| 0.618227
| 38
| 406
| 6.368421
| 0.526316
| 0.099174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.302956
| 406
| 13
| 66
| 31.230769
| 0.855124
| 0.059113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.285714
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
8a44b11af8b2eb998e8acb85624cce72fd9e4d1c
| 303
|
py
|
Python
|
exercicios/ex 061 a 070/ex061.py
|
CarlosWillian/python
|
f863578245fbf402e5b46f844a247355afed0d62
|
[
"MIT"
] | null | null | null |
exercicios/ex 061 a 070/ex061.py
|
CarlosWillian/python
|
f863578245fbf402e5b46f844a247355afed0d62
|
[
"MIT"
] | null | null | null |
exercicios/ex 061 a 070/ex061.py
|
CarlosWillian/python
|
f863578245fbf402e5b46f844a247355afed0d62
|
[
"MIT"
] | null | null | null |
print('Crie sua P.A. de 10 termos')
n1 = int(input('Digite o primeiro termo da P.A.: '))
r = int(input('Digite a razão: '))
termo = n1
c = 1
print('A P.A. é (', end='')
while c <= 10:
print('{}'.format(termo), end='')
print(', ' if c < 10 else '', end='')
termo += r
c += 1
print(')')
| 20.2
| 52
| 0.518152
| 52
| 303
| 3.019231
| 0.5
| 0.038217
| 0.178344
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04329
| 0.237624
| 303
| 14
| 53
| 21.642857
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0.298013
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.416667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 1
|
8a44d6f6124cbf59eb9c835f08ecb56f0d9adf5a
| 737
|
py
|
Python
|
PythonBasics/ConditionalStatements/Exercise/toy_shop.py
|
achoraev/SoftUni
|
0cc7db470a096cc33bbe0ca6bd90060b79120573
|
[
"Apache-2.0"
] | null | null | null |
PythonBasics/ConditionalStatements/Exercise/toy_shop.py
|
achoraev/SoftUni
|
0cc7db470a096cc33bbe0ca6bd90060b79120573
|
[
"Apache-2.0"
] | null | null | null |
PythonBasics/ConditionalStatements/Exercise/toy_shop.py
|
achoraev/SoftUni
|
0cc7db470a096cc33bbe0ca6bd90060b79120573
|
[
"Apache-2.0"
] | null | null | null |
price = float(input())
puzzles = int(input())
dolls = int(input())
bears = int(input())
minions = int(input())
trucks = int(input())
total_toys = puzzles + dolls + bears + minions + trucks
price_puzzles = puzzles * 2.6
price_dolls = dolls * 3
price_bears = bears * 4.1
price_minions = minions * 8.2
price_trucks = trucks * 2
total_price = price_puzzles + price_dolls + price_bears + price_minions + price_trucks
if total_toys >= 50:
total_price = total_price - (total_price * 0.25)
rent = total_price * 0.1
total_price = total_price - rent
if total_price >= price:
print(f"Yes! {(total_price - price):.2f} lv left.")
else:
print(f"Not enough money! {(price - total_price):.2f} lv needed.")
| 25.413793
| 87
| 0.662144
| 107
| 737
| 4.35514
| 0.308411
| 0.214592
| 0.128755
| 0.128755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02906
| 0.206242
| 737
| 28
| 88
| 26.321429
| 0.767521
| 0
| 0
| 0
| 0
| 0
| 0.136812
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.095238
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a455ca53b609476797038c96b21d969bbdf51e3
| 2,234
|
py
|
Python
|
bookshelf/main/forms.py
|
thewordisbird/bookshelf
|
5166720bdc0dbffedc14b71b0f75ad78dc69b465
|
[
"MIT"
] | null | null | null |
bookshelf/main/forms.py
|
thewordisbird/bookshelf
|
5166720bdc0dbffedc14b71b0f75ad78dc69b465
|
[
"MIT"
] | null | null | null |
bookshelf/main/forms.py
|
thewordisbird/bookshelf
|
5166720bdc0dbffedc14b71b0f75ad78dc69b465
|
[
"MIT"
] | null | null | null |
import datetime
from flask_wtf import FlaskForm
from wtforms import (
StringField,
TextAreaField,
DateTimeField,
HiddenField,
PasswordField,
)
from wtforms.validators import DataRequired, ValidationError, Email, EqualTo
class NullableDateTimeField(DateTimeField):
"""Modify DateField to allow for Null values"""
def process_formdata(self, valuelist):
# Bypasses wtForms validation for blank datetime field.
if valuelist:
date_str = " ".join(valuelist).strip()
if date_str == "":
self.data = None
return
try:
self.data = datetime.datetime.strptime(date_str, self.format)
except ValueError:
self.data = None
raise ValueError(self.gettext("Not a valid date value"))
class SearchForm(FlaskForm):
search = StringField("Search", validators=[DataRequired()])
class ReviewForm(FlaskForm):
rating = HiddenField("Rating", validators=[DataRequired()])
review_title = StringField("Headline")
review_content = TextAreaField("Review")
date_started = NullableDateTimeField("Date Started", format="%m/%d/%Y")
date_finished = NullableDateTimeField("Date Finished", format="%m/%d/%Y")
def validate_date_finished(self, date_finished):
if self.date_started.data and date_finished.data:
if self.date_started.data > date_finished.data:
print("Date finished must be greater than or equal to date started")
raise ValidationError(
"Date finished must be greater than or equal to date started."
)
elif self.date_started.data or date_finished.data:
print("missing date")
raise ValidationError("If setting read dates, both dates are required.")
class EditProfileForm(FlaskForm):
display_name = StringField("Name", validators=[])
email = StringField("Email", validators=[Email(message="Invalid Email Address.")])
password = PasswordField(
"Password",
validators=[EqualTo("confirm_password", message="Passwords must match.")],
)
confirm_password = PasswordField("Confirm Password", validators=[])
| 36.032258
| 86
| 0.658013
| 230
| 2,234
| 6.3
| 0.408696
| 0.074534
| 0.031056
| 0.039337
| 0.096618
| 0.067633
| 0.067633
| 0.067633
| 0.067633
| 0.067633
| 0
| 0
| 0.243957
| 2,234
| 61
| 87
| 36.622951
| 0.857904
| 0.042972
| 0
| 0.041667
| 0
| 0
| 0.168856
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0.104167
| 0.083333
| 0
| 0.4375
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
8a458f7c27c0535d07e4b642f5a00528aee12141
| 3,387
|
py
|
Python
|
main.py
|
DanielM24/Romanian-sub-dialect-identificator
|
78b3e00f8ee768eb0b1e8cf832a2dc0b8504b04d
|
[
"MIT"
] | null | null | null |
main.py
|
DanielM24/Romanian-sub-dialect-identificator
|
78b3e00f8ee768eb0b1e8cf832a2dc0b8504b04d
|
[
"MIT"
] | null | null | null |
main.py
|
DanielM24/Romanian-sub-dialect-identificator
|
78b3e00f8ee768eb0b1e8cf832a2dc0b8504b04d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Proiect.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1TR1Frf0EX4PtFZkLlVdGtMTINqhoQwRw
"""
# Importarea librariilor
import numpy as np
import pandas as pd # pandas pentru citirea fisierelor
from sklearn import preprocessing
from sklearn import svm # importarea modelului
from sklearn.feature_extraction.text import TfidfVectorizer # modelarea datelor pentru a obtine valori numerice din text
from sklearn.metrics import classification_report, confusion_matrix
# Incarcarea datelor
train_labels = pd.read_csv('train_labels.txt', sep='\t', header=None, engine='python')
train_labels = train_labels.to_numpy() # convertim data frame-ul intr-un vector
train_labels = train_labels[:,1] # pastram doar etichetele
train_samples = pd.read_csv('train_samples.txt', sep='\t', header=None, engine='python')
train_samples = train_samples.to_numpy()
train_samples = train_samples[:,1] # pastram doar cuvintele
validation_samples = pd.read_csv('validation_samples.txt', sep='\t', header=None, engine='python')
validation_samples = validation_samples.to_numpy()
validation_samples = validation_samples[:,1] # salvam cuvintele
validation_labels = pd.read_csv('validation_labels.txt', sep='\t', header=None, engine='python')
validation_labels = validation_labels.to_numpy()
validation_labels = validation_labels[:,1] # pastram doar etichetele
test_samples = pd.read_csv('test_samples.txt', sep='\t', header=None, engine='python')
test_samples = test_samples.to_numpy()
label = test_samples[:,0] # salvam etichetele
test_samples = test_samples[:,1] # salvam cuvintele
def normalize_data(train_data, test_data, type='l2'): # functia care intoarce datele normalizate
#tipul de normalizare este setat implicit la l2
scaler = None
if type == 'standard':
scaler = preprocessing.StandardScaler()
elif type == 'min_max':
scaler = preprocessing.MinMaxScaler()
elif type == 'l1' or type == 'l2':
scaler = preprocessing.Normalizer(norm = type)
if scaler is not None:
scaler.fit(train_data)
scaled_train_data = scaler.transform(train_data)
scaled_test_data = scaler.transform(test_data)
return scaled_train_data, scaled_test_data
else:
return train_data, test_data
# Modelarea datelor
vectorizer = TfidfVectorizer()
training_features = vectorizer.fit_transform(train_samples)
validation_features = vectorizer.transform(validation_samples)
testing_features = vectorizer.transform(test_samples)
# Normalizarea datelor
norm_train, norm_test = normalize_data(training_features, testing_features)
norm_validation, _ = normalize_data(validation_features, validation_features)
# Aplicam modelul SVM
model_svm = svm.SVC(kernel='linear', C=23, gamma=110) # definim modelul
model_svm.fit(norm_train, train_labels) # procesul de invatare
test_predictions = model_svm.predict(norm_test) # predictie pe datele de test
print("Classification report: ")
print(classification_report(validation_labels, model_svm.predict(norm_validation)))
print("Confusion matrix: ")
print(confusion_matrix(validation_labels, model_svm.predict(norm_validation)))
# Exportarea datelor in format CSV
test_export = {'id':label,'label':test_predictions}
data_f = pd.DataFrame(test_export)
data_f.to_csv('test_submission.csv',index=False)
| 38.05618
| 120
| 0.775613
| 439
| 3,387
| 5.756264
| 0.343964
| 0.050653
| 0.017808
| 0.025722
| 0.15829
| 0.117926
| 0.117926
| 0.082311
| 0
| 0
| 0
| 0.006741
| 0.124004
| 3,387
| 89
| 121
| 38.05618
| 0.844961
| 0.220254
| 0
| 0
| 1
| 0
| 0.086458
| 0.01645
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0
| 0.113208
| 0
| 0.169811
| 0.075472
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a490933d8b95e96a7ba4163aae03b0fe0c37be5
| 657
|
py
|
Python
|
pytorch-frontend/tools/code_coverage/oss_coverage.py
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 40
|
2021-06-01T07:37:59.000Z
|
2022-03-25T01:42:09.000Z
|
pytorch-frontend/tools/code_coverage/oss_coverage.py
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 14
|
2021-06-01T11:52:46.000Z
|
2022-03-25T02:13:08.000Z
|
pytorch-frontend/tools/code_coverage/oss_coverage.py
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 7
|
2021-07-20T19:34:26.000Z
|
2022-03-13T21:07:36.000Z
|
#!/usr/bin/env python
import time
from package.oss.cov_json import get_json_report
from package.oss.init import initialization
from package.tool.summarize_jsons import summarize_jsons
from package.util.setting import TestPlatform
def report_coverage() -> None:
start_time = time.time()
(options, test_list, interested_folders) = initialization()
# run cpp tests
get_json_report(test_list, options)
# collect coverage data from json profiles
if options.need_summary:
summarize_jsons(
test_list, interested_folders, [""], TestPlatform.OSS, start_time
)
if __name__ == "__main__":
report_coverage()
| 27.375
| 77
| 0.73516
| 83
| 657
| 5.506024
| 0.493976
| 0.09628
| 0.061269
| 0.109409
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.182648
| 657
| 23
| 78
| 28.565217
| 0.851024
| 0.114155
| 0
| 0
| 0
| 0
| 0.013817
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.333333
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
8a497075ae36fc35a089004f84ef24d85e09ec1c
| 401
|
py
|
Python
|
groupthink/version.py
|
emanuelfeld/groupthink
|
d8a6f666080352d396b07096cbd6304391f7c38d
|
[
"CC0-1.0"
] | 1
|
2017-01-09T17:27:05.000Z
|
2017-01-09T17:27:05.000Z
|
groupthink/version.py
|
emanuelfeld/groupthink
|
d8a6f666080352d396b07096cbd6304391f7c38d
|
[
"CC0-1.0"
] | null | null | null |
groupthink/version.py
|
emanuelfeld/groupthink
|
d8a6f666080352d396b07096cbd6304391f7c38d
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of groupthink.
# https://github.com/emanuelfeld/groupthink
# This project is in the public domain within the United States.
# Additionally, the Government of the District of Columbia waives
# copyright and related rights in the work worldwide through the CC0 1.0
# Universal public domain dedication.
__version__ = '1.0.0' # NOQA
| 28.642857
| 72
| 0.743142
| 60
| 401
| 4.9
| 0.733333
| 0.034014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020896
| 0.164589
| 401
| 13
| 73
| 30.846154
| 0.856716
| 0.887781
| 0
| 0
| 0
| 0
| 0.138889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a49e6407bf66d6fbb676497c6a102a344eeed6b
| 2,533
|
py
|
Python
|
apps/core/migrations/0001_initial.py
|
Visualway/Vitary
|
c7db9a25837fa7390b2177b9db48e73c6f1ab3c8
|
[
"BSD-3-Clause"
] | 4
|
2021-12-24T16:07:44.000Z
|
2022-03-04T02:30:20.000Z
|
apps/core/migrations/0001_initial.py
|
Visualway/Vitary
|
c7db9a25837fa7390b2177b9db48e73c6f1ab3c8
|
[
"BSD-3-Clause"
] | 4
|
2021-12-30T13:32:56.000Z
|
2022-03-15T03:58:48.000Z
|
apps/core/migrations/0001_initial.py
|
Visualway/Vitary
|
c7db9a25837fa7390b2177b9db48e73c6f1ab3c8
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 4.0.2 on 2022-03-02 03:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('vit', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Badge',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('color', models.CharField(choices=[('success', 'Green'), ('info', 'Blue'), ('link', 'Purple'), ('primary', 'Turquoise'), ('warning', 'Yellow'), ('danger', 'Red'), ('dark', 'Black'), ('white', 'White')], max_length=50)),
('special', models.BooleanField(default=False)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Requirments',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('badge', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.badge')),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Abuse',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('abuse_type', models.CharField(choices=[('ABUSE', 'Abuse'), ('INAPPROPRIATE', 'Inappropriate'), ('SPAM', 'Spam'), ('BULLYING', 'Bullying'), ('SEXUAL_CONTENT', 'Sexual Content'), ('OTHER', 'Other')], max_length=50)),
('description', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
('to_vit', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vit.vit')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Abuses',
'ordering': ['-date'],
},
),
]
| 42.932203
| 236
| 0.55073
| 236
| 2,533
| 5.788136
| 0.389831
| 0.029283
| 0.040996
| 0.064422
| 0.474378
| 0.474378
| 0.38287
| 0.38287
| 0.38287
| 0.38287
| 0
| 0.014803
| 0.279905
| 2,533
| 58
| 237
| 43.672414
| 0.734101
| 0.017766
| 0
| 0.490196
| 1
| 0
| 0.158488
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.137255
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a4c6c7b420769dc35e8f30f400909774d7d25e6
| 22,154
|
py
|
Python
|
gfirefly/dbentrust/dbutils.py
|
handsome3163/H2Dgame-Firefly
|
2d213928977dc490909f456327e5cae80998e60d
|
[
"MIT"
] | 675
|
2015-01-01T05:18:30.000Z
|
2022-03-18T08:27:06.000Z
|
gfirefly/dbentrust/dbutils.py
|
liuis/Firefly
|
fd2795b8c26de6ab63bbec23d11f18c3dfb39a50
|
[
"MIT"
] | 3
|
2015-01-29T02:36:14.000Z
|
2022-01-21T09:19:21.000Z
|
gfirefly/dbentrust/dbutils.py
|
liuis/Firefly
|
fd2795b8c26de6ab63bbec23d11f18c3dfb39a50
|
[
"MIT"
] | 248
|
2015-01-04T08:24:31.000Z
|
2022-02-18T07:14:02.000Z
|
#coding:utf8
'''
Created on 2013-8-21
@author: lan (www.9miao.com)
'''
import itertools
import datetime
def safeunicode(obj, encoding='utf-8'):
r"""
Converts any given object to unicode string.
>>> safeunicode('hello')
u'hello'
>>> safeunicode(2)
u'2'
>>> safeunicode('\xe1\x88\xb4')
u'\u1234'
"""
t = type(obj)
if t is unicode:
return obj
elif t is str:
return obj.decode(encoding)
elif t in [int, float, bool]:
return unicode(obj)
elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):
return unicode(obj)
else:
return str(obj).decode(encoding)
def safestr(obj, encoding='utf-8'):
r"""
Converts any given object to utf-8 encoded string.
>>> safestr('hello')
'hello'
>>> safestr(u'\u1234')
'\xe1\x88\xb4'
>>> safestr(2)
'2'
"""
if isinstance(obj, unicode):
return obj.encode(encoding)
elif isinstance(obj, str):
return obj
elif hasattr(obj, 'next'): # iterator
return itertools.imap(safestr, obj)
else:
return str(obj)
def sqlify(obj):
"""
converts `obj` to its proper SQL version
>>> sqlify(None)
'NULL'
>>> sqlify(True)
"'t'"
>>> sqlify(3)
'3'
"""
# because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
if obj is None:
return 'NULL'
elif obj is True:
return "'t'"
elif obj is False:
return "'f'"
elif datetime and isinstance(obj, datetime.datetime):
return repr(obj.isoformat())
else:
if isinstance(obj, unicode): obj = obj.encode('utf8')
return repr(obj)
def sqllist(lst):
"""
Converts the arguments for use in something like a WHERE clause.
>>> sqllist(['a', 'b'])
'a, b'
>>> sqllist('a')
'a'
>>> sqllist(u'abc')
u'abc'
"""
if isinstance(lst, basestring):
return lst
else:
return ', '.join(lst)
def _sqllist(values):
"""
>>> _sqllist([1, 2, 3])
<sql: '(1, 2, 3)'>
"""
items = []
items.append('(')
for i, v in enumerate(values):
if i != 0:
items.append(', ')
items.append(sqlparam(v))
items.append(')')
return SQLQuery(items)
def sqlquote(a):
"""
Ensures `a` is quoted properly for use in a SQL query.
>>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3)
<sql: "WHERE x = 't' AND y = 3">
>>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3])
<sql: "WHERE x = 't' AND y IN (2, 3)">
"""
if isinstance(a, list):
return _sqllist(a)
else:
return sqlparam(a).sqlquery()
def _interpolate(sformat):
"""
Takes a format string and returns a list of 2-tuples of the form
(boolean, string) where boolean says whether string should be evaled
or not.
from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
"""
from tokenize import tokenprog
tokenprog = tokenprog
def matchorfail(text, pos):
match = tokenprog.match(text, pos)
if match is None:
raise _ItplError(text, pos)
return match, match.end()
namechars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
chunks = []
pos = 0
while 1:
dollar = sformat.find("$", pos)
if dollar < 0:
break
nextchar = sformat[dollar + 1]
if nextchar == "{":
chunks.append((0, sformat[pos:dollar]))
pos, level = dollar + 2, 1
while level:
match, pos = matchorfail(sformat, pos)
tstart, tend = match.regs[3]
token = sformat[tstart:tend]
if token == "{":
level = level + 1
elif token == "}":
level = level - 1
chunks.append((1, sformat[dollar + 2:pos - 1]))
elif nextchar in namechars:
chunks.append((0, sformat[pos:dollar]))
match, pos = matchorfail(sformat, dollar + 1)
while pos < len(sformat):
if sformat[pos] == "." and \
pos + 1 < len(sformat) and sformat[pos + 1] in namechars:
match, pos = matchorfail(sformat, pos + 1)
elif sformat[pos] in "([":
pos, level = pos + 1, 1
while level:
match, pos = matchorfail(sformat, pos)
tstart, tend = match.regs[3]
token = sformat[tstart:tend]
if token[0] in "([":
level = level + 1
elif token[0] in ")]":
level = level - 1
else:
break
chunks.append((1, sformat[dollar + 1:pos]))
else:
chunks.append((0, sformat[pos:dollar + 1]))
pos = dollar + 1 + (nextchar == "$")
if pos < len(sformat):
chunks.append((0, sformat[pos:]))
return chunks
def sqlwhere(dictionary, grouping=' AND '):
"""
Converts a `dictionary` to an SQL WHERE clause `SQLQuery`.
>>> sqlwhere({'cust_id': 2, 'order_id':3})
<sql: 'order_id = 3 AND cust_id = 2'>
>>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ')
<sql: 'order_id = 3, cust_id = 2'>
>>> sqlwhere({'a': 'a', 'b': 'b'}).query()
'a = %s AND b = %s'
"""
return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping)
def reparam(string_, dictionary):
"""
Takes a string and a dictionary and interpolates the string
using values from the dictionary. Returns an `SQLQuery` for the result.
>>> reparam("s = $s", dict(s=True))
<sql: "s = 't'">
>>> reparam("s IN $s", dict(s=[1, 2]))
<sql: 's IN (1, 2)'>
"""
dictionary = dictionary.copy() # eval mucks with it
result = []
for live, chunk in _interpolate(string_):
if live:
v = eval(chunk, dictionary)
result.append(sqlquote(v))
else:
result.append(chunk)
return SQLQuery.join(result, '')
class UnknownParamstyle(Exception):
"""
raised for unsupported db paramstyles
(currently supported: qmark, numeric, format, pyformat)
"""
pass
class _ItplError(ValueError):
def __init__(self, text, pos):
ValueError.__init__(self)
self.text = text
self.pos = pos
def __str__(self):
return "unfinished expression in %s at char %d" % (
repr(self.text), self.pos)
class SQLParam(object):
"""
Parameter in SQLQuery.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")])
>>> q
<sql: "SELECT * FROM test WHERE name='joe'">
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.values()
['joe']
"""
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def get_marker(self, paramstyle='pyformat'):
if paramstyle == 'qmark':
return '?'
elif paramstyle == 'numeric':
return ':1'
elif paramstyle is None or paramstyle in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle, paramstyle
def sqlquery(self):
return SQLQuery([self])
def __add__(self, other):
return self.sqlquery() + other
def __radd__(self, other):
return other + self.sqlquery()
def __str__(self):
return str(self.value)
def __repr__(self):
return '<param: %s>' % repr(self.value)
sqlparam = SQLParam
class SQLQuery(object):
"""
You can pass this sort of thing as a clause in any db function.
Otherwise, you can pass a dictionary to the keyword argument `vars`
and the function will call reparam for you.
Internally, consists of `items`, which is a list of strings and
SQLParams, which get concatenated to produce the actual query.
"""
__slots__ = ["items"]
# tested in sqlquote's docstring
def __init__(self, items=None):
r"""Creates a new SQLQuery.
>>> SQLQuery("x")
<sql: 'x'>
>>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)])
>>> q
<sql: 'SELECT * FROM test WHERE x=1'>
>>> q.query(), q.values()
('SELECT * FROM test WHERE x=%s', [1])
>>> SQLQuery(SQLParam(1))
<sql: '1'>
"""
if items is None:
self.items = []
elif isinstance(items, list):
self.items = items
elif isinstance(items, SQLParam):
self.items = [items]
elif isinstance(items, SQLQuery):
self.items = list(items.items)
else:
self.items = [items]
# Take care of SQLLiterals
for i, item in enumerate(self.items):
if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral):
self.items[i] = item.value.v
def append(self, value):
self.items.append(value)
def __add__(self, other):
if isinstance(other, basestring):
items = [other]
elif isinstance(other, SQLQuery):
items = other.items
else:
return NotImplemented
return SQLQuery(self.items + items)
def __radd__(self, other):
if isinstance(other, basestring):
items = [other]
else:
return NotImplemented
return SQLQuery(items + self.items)
def __iadd__(self, other):
if isinstance(other, (basestring, SQLParam)):
self.items.append(other)
elif isinstance(other, SQLQuery):
self.items.extend(other.items)
else:
return NotImplemented
return self
def __len__(self):
return len(self.query())
def query(self, paramstyle=None):
"""
Returns the query part of the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.query(paramstyle='qmark')
'SELECT * FROM test WHERE name=?'
"""
s = []
for x in self.items:
if isinstance(x, SQLParam):
x = x.get_marker(paramstyle)
s.append(safestr(x))
else:
x = safestr(x)
# automatically escape % characters in the query
# For backward compatability, ignore escaping when the query looks already escaped
if paramstyle in ['format', 'pyformat']:
if '%' in x and '%%' not in x:
x = x.replace('%', '%%')
s.append(x)
return "".join(s)
def values(self):
"""
Returns the values of the parameters used in the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.values()
['joe']
"""
return [i.value for i in self.items if isinstance(i, SQLParam)]
def join(items, sep=' ', prefix=None, suffix=None, target=None):
"""
Joins multiple queries.
>>> SQLQuery.join(['a', 'b'], ', ')
<sql: 'a, b'>
Optinally, prefix and suffix arguments can be provided.
>>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')')
<sql: '(a, b)'>
If target argument is provided, the items are appended to target instead of creating a new SQLQuery.
"""
if target is None:
target = SQLQuery()
target_items = target.items
if prefix:
target_items.append(prefix)
for i, item in enumerate(items):
if i != 0:
target_items.append(sep)
if isinstance(item, SQLQuery):
target_items.extend(item.items)
else:
target_items.append(item)
if suffix:
target_items.append(suffix)
return target
join = staticmethod(join)
def _str(self):
try:
return self.query() % tuple([sqlify(x) for x in self.values()])
except (ValueError, TypeError):
return self.query()
def __str__(self):
return safestr(self._str())
def __unicode__(self):
return safeunicode(self._str())
def __repr__(self):
return '<sql: %s>' % repr(str(self))
class SQLLiteral:
"""
Protects a string from `sqlquote`.
>>> sqlquote('NOW()')
<sql: "'NOW()'">
>>> sqlquote(SQLLiteral('NOW()'))
<sql: 'NOW()'>
"""
def __init__(self, v):
self.v = v
def __repr__(self):
return self.v
class SQLProducer:
"""Database"""
def __init__(self):
"""Creates a database.
"""
pass
def query(self, sql_query,processed=False, svars=None):
"""
Execute SQL query `sql_query` using dictionary `vars` to interpolate it.
If `processed=True`, `vars` is a `reparam`-style list to use
instead of interpolating.
>>> db = DB(None, {})
>>> db.query("SELECT * FROM foo", _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
>>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
"""
if svars is None:
svars = {}
if not processed and not isinstance(sql_query, SQLQuery):
sql_query = reparam(sql_query, svars)
return sql_query
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('LIMIT', limit),
('OFFSET', offset))
def gen_clause(self, sql, val, svars):
if isinstance(val, (int, long)):
if sql == 'WHERE':
nout = 'id = ' + sqlquote(val)
else:
nout = SQLQuery(val)
elif isinstance(val, (list, tuple)) and len(val) == 2:
nout = SQLQuery(val[0], val[1]) # backwards-compatibility
elif isinstance(val, SQLQuery):
nout = val
else:
nout = reparam(val, svars)
def xjoin(a, b):
if a and b: return a + ' ' + b
else: return a or b
return xjoin(sql, nout)
def _where(self, where, svars):
if isinstance(where, (int, long)):
where = "id = " + sqlparam(where)
elif isinstance(where, (list, tuple)) and len(where) == 2:
where = SQLQuery(where[0], where[1])
elif isinstance(where, SQLQuery):
pass
else:
where = reparam(where, svars)
return where
def select(self, tables, svars=None, what='*', where=None, order=None, group=None,
limit=None, offset=None, _test=False):
"""
Selects `what` from `tables` with clauses `where`, `order`,
`group`, `limit`, and `offset`. Uses vars to interpolate.
Otherwise, each clause can be a SQLQuery.
>>> db = DB(None, {})
>>> db.select('foo', _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True)
<sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'>
"""
if svars is None: svars = {}
sql_clauses = self.sql_clauses(what, tables, where, group, order, limit, offset)
clauses = [self.gen_clause(sql, val, svars) for sql, val in sql_clauses if val is not None]
qout = SQLQuery.join(clauses)
if _test: return qout
return self.query(qout, processed=True)
def insert(self, tablename, seqname=None, _test=False, **values):
"""
Inserts `values` into `tablename`. Returns current sequence ID.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True)
>>> q
<sql: "INSERT INTO foo (age, name, created) VALUES (2, 'bob', NOW())">
>>> q.query()
'INSERT INTO foo (age, name, created) VALUES (%s, %s, NOW())'
>>> q.values()
[2, 'bob']
"""
def q(x): return "(" + x + ")"
if values:
_keys = SQLQuery.join(values.keys(), ', ')
_values = SQLQuery.join([sqlparam(v) for v in values.values()], ', ')
sql_query = "INSERT INTO %s " % tablename + q(_keys) + ' VALUES ' + q(_values)
else:
sql_query = SQLQuery(self._get_insert_default_values_query(tablename))
return sql_query
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s DEFAULT VALUES" % table
def multiple_insert(self, tablename, values, seqname=None, _test=False):
"""
Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries,
one for each row to be inserted, each with the same set of keys.
Returns the list of ids of the inserted rows.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> db.supports_multiple_insert = True
>>> values = [{"name": "foo", "email": "[email protected]"}, {"name": "bar", "email": "[email protected]"}]
>>> db.multiple_insert('person', values=values, _test=True)
<sql: "INSERT INTO person (name, email) VALUES ('foo', '[email protected]'), ('bar', '[email protected]')">
"""
if not values:
return []
if not self.supports_multiple_insert:
out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values]
if seqname is False:
return None
else:
return out
keys = values[0].keys()
#@@ make sure all keys are valid
# make sure all rows have same keys.
for v in values:
if v.keys() != keys:
raise ValueError, 'Bad data'
sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys)))
for i, row in enumerate(values):
if i != 0:
sql_query.append(", ")
SQLQuery.join([SQLParam(row[k]) for k in keys], sep=", ", target=sql_query, prefix="(", suffix=")")
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
out = range(out-len(values)+1, out+1)
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def update(self, tables, where, svars=None, _test=False, **values):
"""
Update `tables` with clause `where` (interpolated using `vars`)
and setting `values`.
>>> db = DB(None, {})
>>> name = 'Joseph'
>>> q = db.update('foo', where='name = $name', name='bob', age=2,
... created=SQLLiteral('NOW()'), vars=locals(), _test=True)
>>> q
<sql: "UPDATE foo SET age = 2, name = 'bob', created = NOW() WHERE name = 'Joseph'">
>>> q.query()
'UPDATE foo SET age = %s, name = %s, created = NOW() WHERE name = %s'
>>> q.values()
[2, 'bob', 'Joseph']
"""
if svars is None: svars = {}
where = self._where(where, svars)
query = (
"UPDATE " + sqllist(tables) +
" SET " + sqlwhere(values, ', ') +
" WHERE " + where)
if _test: return query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, query)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def delete(self, table, where, using=None, svars=None, _test=False):
"""
Deletes from `table` with clauses `where` and `using`.
>>> db = DB(None, {})
>>> name = 'Joe'
>>> db.delete('foo', where='name = $name', vars=locals(), _test=True)
<sql: "DELETE FROM foo WHERE name = 'Joe'">
"""
if svars is None:
svars = {}
where = self._where(where, svars)
q = 'DELETE FROM ' + table
if using:
q += ' USING ' + sqllist(using)
if where:
q += ' WHERE ' + where
return q
sqlproducer = SQLProducer()
| 31.693848
| 115
| 0.510743
| 2,510
| 22,154
| 4.4251
| 0.147809
| 0.015846
| 0.012605
| 0.017106
| 0.231476
| 0.174124
| 0.124066
| 0.094715
| 0.08121
| 0.073467
| 0
| 0.009403
| 0.35673
| 22,154
| 699
| 116
| 31.693848
| 0.769981
| 0.021802
| 0
| 0.235135
| 0
| 0
| 0.033266
| 0.004527
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.008108
| 0.008108
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a4ccded7f4f9f9be895e48e8a31955a7046241e
| 4,371
|
py
|
Python
|
dddppp/settings.py
|
tysonclugg/dddppp
|
22f52d671ca71c2df8d6ac566a1626e5f05b3159
|
[
"MIT"
] | null | null | null |
dddppp/settings.py
|
tysonclugg/dddppp
|
22f52d671ca71c2df8d6ac566a1626e5f05b3159
|
[
"MIT"
] | null | null | null |
dddppp/settings.py
|
tysonclugg/dddppp
|
22f52d671ca71c2df8d6ac566a1626e5f05b3159
|
[
"MIT"
] | null | null | null |
"""
Django settings for dddppp project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import pkg_resources
import pwd
PROJECT_NAME = 'dddppp'
# Enforce a valid POSIX environment
# Get missing environment variables via call to pwd.getpwuid(...)
_PW_CACHE = None
_PW_MAP = {
'LOGNAME': 'pw_name',
'USER': 'pw_name',
'USERNAME': 'pw_name',
'UID': 'pw_uid',
'GID': 'pw_gid',
'HOME': 'pw_dir',
'SHELL': 'pw_shell',
}
for _missing_env in set(_PW_MAP).difference(os.environ):
if _PW_CACHE is None:
_PW_CACHE = pwd.getpwuid(os.getuid())
os.environ[_missing_env] = str(getattr(_PW_CACHE, _PW_MAP[_missing_env]))
del _PW_CACHE, _PW_MAP, pwd
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nfd_lvt=&k#h#$a^_l09j#5%s=mg+0aw=@t84ry$&rps43c33+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'localhost',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dddp',
'dddp.server',
'dddp.accounts',
'dddppp.slides',
]
for (requirement, pth) in [
('django-extensions', 'django_extensions'),
]:
try:
pkg_resources.get_distribution(requirement)
except (
pkg_resources.DistributionNotFound,
pkg_resources.VersionConflict,
):
continue
INSTALLED_APPS.append(pth)
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'django.middleware.security.SecurityMiddleware',
]
ROOT_URLCONF = 'dddppp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dddppp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('PGDATABASE', PROJECT_NAME),
'USER': os.environ.get('PGUSER', os.environ['LOGNAME']),
'PASSWORD': os.environ.get('DJANGO_DATABASE_PASSWORD', ''),
'HOST': os.environ.get('PGHOST', ''),
'PORT': os.environ.get('PGPORT', ''),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-au'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# django-secure
# see: https://github.com/carljm/django-secure/ for more options
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
#SECURE_SSL_REDIRECT = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_FRAME_DENY = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
DDDPPP_CONTENT_TYPES = []
PROJ_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
| 26.981481
| 77
| 0.695722
| 524
| 4,371
| 5.620229
| 0.429389
| 0.052971
| 0.044822
| 0.050934
| 0.099491
| 0.07708
| 0.07708
| 0.07708
| 0.027165
| 0
| 0
| 0.008782
| 0.166324
| 4,371
| 161
| 78
| 27.149068
| 0.799396
| 0.260581
| 0
| 0
| 1
| 0.010101
| 0.389894
| 0.275733
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.010101
| 0.030303
| 0
| 0.030303
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a521621650bf40359a7bc7b59a9b8905567d6ce
| 738
|
py
|
Python
|
app/main/config.py
|
nhattvm11/flask-restful-boilerplate
|
a450c03c1b1db2886b4e00b2c30284a59d9b91e6
|
[
"MIT"
] | null | null | null |
app/main/config.py
|
nhattvm11/flask-restful-boilerplate
|
a450c03c1b1db2886b4e00b2c30284a59d9b91e6
|
[
"MIT"
] | null | null | null |
app/main/config.py
|
nhattvm11/flask-restful-boilerplate
|
a450c03c1b1db2886b4e00b2c30284a59d9b91e6
|
[
"MIT"
] | null | null | null |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.getenv('SECRET_KEY', '')
DEBUG = False
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'flask_main.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
class TestingConfig(Config):
DEBUG = True
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'flask_main.db')
PRESERVE_CONTEXT_ON_EXCEPTION = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProductionConfig(Config):
DEBUG = False
config_by_name = dict(
dev=DevelopmentConfig,
test=TestingConfig,
prod=ProductionConfig
)
key = Config.SECRET_KEY
| 21.085714
| 83
| 0.718157
| 86
| 738
| 5.906977
| 0.453488
| 0.047244
| 0.059055
| 0.098425
| 0.38189
| 0.232283
| 0.232283
| 0.232283
| 0.232283
| 0.232283
| 0
| 0
| 0.176152
| 738
| 34
| 84
| 21.705882
| 0.835526
| 0
| 0
| 0.347826
| 0
| 0
| 0.075881
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.043478
| 0
| 0.695652
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
8a52440233bd3169102a1818d34f5c74f2141148
| 2,823
|
py
|
Python
|
backend/server/converters/schema/ontology.py
|
GenomicsNX/cellxgene
|
f9c744327a3be48c93b47bba71a480e1eeb97835
|
[
"MIT"
] | 8
|
2021-03-17T23:42:41.000Z
|
2022-03-08T13:08:55.000Z
|
backend/server/converters/schema/ontology.py
|
GenomicsNX/cellxgene
|
f9c744327a3be48c93b47bba71a480e1eeb97835
|
[
"MIT"
] | 194
|
2021-08-18T23:52:44.000Z
|
2022-03-30T19:40:41.000Z
|
backend/server/converters/schema/ontology.py
|
GenomicsNX/cellxgene
|
f9c744327a3be48c93b47bba71a480e1eeb97835
|
[
"MIT"
] | 8
|
2021-03-22T17:07:31.000Z
|
2022-03-08T11:07:48.000Z
|
"""Methods for working with ontologies and the OLS."""
from urllib.parse import quote_plus
import requests
OLS_API_ROOT = "http://www.ebi.ac.uk/ols/api"
# Curie means something like CL:0000001
def _ontology_name(curie):
"""Get the name of the ontology from the curie, CL or UBERON for example."""
return curie.split(":")[0]
def _ontology_value(curie):
"""Get the id component of the curie, 0000001 from CL:0000001 for example."""
return curie.split(":")[1]
def _double_encode(url):
"""Double url encode a url. This is required by the OLS API."""
return quote_plus(quote_plus(url))
def _iri(curie):
"""Get the iri from a curie. This is a bit hopeful that they all map to purl.obolibrary.org"""
if _ontology_name(curie) == "EFO":
return f"http://www.ebi.ac.uk/efo/EFO_{_ontology_value(curie)}"
return f"http://purl.obolibrary.org/obo/{_ontology_name(curie)}_{_ontology_value(curie)}"
class OntologyLookupError(Exception):
"""Exception for some problem with looking up ontology information."""
def _ontology_info_url(curie):
"""Get the to make a GET to to get information about an ontology term."""
# If the curie is empty, just return an empty string. This happens when there is no
# valid ontology value.
if not curie:
return ""
else:
return f"{OLS_API_ROOT}/ontologies/{_ontology_name(curie)}/terms/{_double_encode(_iri(curie))}"
def get_ontology_label(curie):
"""For a given curie like 'CL:1000413', get the label like 'endothelial cell of artery'"""
url = _ontology_info_url(curie)
if not url:
return ""
response = requests.get(url)
if not response.ok:
raise OntologyLookupError(
f"Curie {curie} lookup failed, got status code {response.status_code}: {response.text}"
)
return response.json()["label"]
def lookup_candidate_term(label, ontology="cl", method="select"):
"""Lookup candidate terms for a label. This is useful when there is an existing label in a
submitted dataset, and you want to find an appropriate ontology term.
Args:
label: the label to find ontology terms for
ontology: the ontology to search in, cl or uberon or efo for example
method: select or search. search provides much broader results
Returns:
list of (curie, label) tuples returned by OLS
"""
# using OLS REST API [https://www.ebi.ac.uk/ols/docs/api]
url = f"{OLS_API_ROOT}/{method}?q={quote_plus(label)}&ontology={ontology.lower()}"
response = requests.get(url)
if not response.ok:
raise OntologyLookupError(
f"Label {label} lookup failed, got status code {response.status_code}: {response.text}"
)
return [(r["obo_id"], r["label"]) for r in response.json()["response"]["docs"]]
| 32.448276
| 103
| 0.681899
| 412
| 2,823
| 4.563107
| 0.32767
| 0.015957
| 0.03617
| 0.015957
| 0.181915
| 0.130851
| 0.130851
| 0.130851
| 0.130851
| 0.130851
| 0
| 0.013321
| 0.202267
| 2,823
| 86
| 104
| 32.825581
| 0.821492
| 0.41091
| 0
| 0.216216
| 0
| 0
| 0.333333
| 0.129032
| 0
| 0
| 0
| 0
| 0
| 1
| 0.189189
| false
| 0
| 0.054054
| 0
| 0.540541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
8a5581cd0e7ff399dcb5faaf23430dc8e5e4058e
| 4,370
|
py
|
Python
|
figure_code/rate_of_change_tc.py
|
DavisWeaver/fears
|
857cb959a3a111a41df4cf62c4c6a19d3abd33c0
|
[
"MIT"
] | null | null | null |
figure_code/rate_of_change_tc.py
|
DavisWeaver/fears
|
857cb959a3a111a41df4cf62c4c6a19d3abd33c0
|
[
"MIT"
] | null | null | null |
figure_code/rate_of_change_tc.py
|
DavisWeaver/fears
|
857cb959a3a111a41df4cf62c4c6a19d3abd33c0
|
[
"MIT"
] | 1
|
2021-11-09T14:42:01.000Z
|
2021-11-09T14:42:01.000Z
|
import matplotlib.pyplot as plt
import numpy as np
from fears.utils import results_manager, plotter, dir_manager
import os
suffix = '07212021_0001'
data_folder = 'results_' + suffix
exp_info_file = 'experiment_info_' + suffix + '.p'
exp_folders,exp_info = results_manager.get_experiment_results(data_folder,
exp_info_file)
max_cells = exp_info.populations[0].max_cells
n_sims = exp_info.n_sims
k_abs = exp_info.slopes
exp_folders.reverse()
k_abs = np.flip(k_abs)
fig,ax = plt.subplots(nrows=2,ncols=2,figsize=(4,4))
pop = exp_info.populations[0]
ax = ax.reshape((len(k_abs),))
axnum = 0
tc_axes=[]
drug_axes=[]
for exp in exp_folders:
k_abs_t = exp[exp.find('=')+1:]
k_abs_t = float(k_abs_t)
num = np.argwhere(k_abs == k_abs_t)
num = num[0,0]
# generate timecourse axes
tcax = ax[axnum]
# da = tcax.twinx()
sim_files = os.listdir(path=exp)
sim_files = sorted(sim_files)
survive_count = 0
counts_total = None
k=0
while k < len(sim_files):
# for sim in sim_files:
sim = sim_files[k]
sim = exp + os.sep + sim
data = results_manager.get_data(sim)
dc = data[:,-1]
data = data[:,0:-1]
# data = data/np.max(data)
data_t = data[-1,:]
# check to see if any genotypes are at least 10% of the max cell count
if any(data_t >= 1):
survive_count += 1
if counts_total is None:
counts_total = data
else:
counts_total += data
# data = data/np.max(data)
# exp_info.populations[num].counts_log_scale = True
data = data/max_cells
if k==0:
drug_kwargs = {'alpha':0.7,
'color':'black',
'linewidth':2,
'label':'Drug Concentration ($\u03BC$M)'
}
tcax,drug_ax = plotter.plot_timecourse_to_axes(exp_info.populations[num],
data,
tcax,
drug_curve=dc,
drug_ax_sci_notation=True,
drug_kwargs=drug_kwargs,
legend_labels=False,
grayscale=True,
color='gray',
linewidth=1,
labelsize=12,
alpha=0.7
)
drug_ax.set_ylabel('')
drug_axes.append( drug_ax )
else:
tcax,da = plotter.plot_timecourse_to_axes(exp_info.populations[num],
data,
tcax,
grayscale=True,
color='gray',
legend_labels=False,
linewidth=2,
labelsize=12,
alpha=0.2
)
# drug_ax.set_ylim(0,10**4)
k+=1
if survive_count > 0:
counts_avg = counts_total/survive_count
# counts_avg = counts_avg/np.max(counts_avg)
# counts_avg = counts_total
counts_avg = counts_avg/np.max(counts_avg)
tcax,temp = plotter.plot_timecourse_to_axes(exp_info.populations[num],
counts_avg,
tcax,
labelsize=12)
# t = np.arange(len(dc))
# t = t*exp_info.populations[0].timestep_scale/24
# da.plot(t,dc)
tc_axes.append( tcax )
axnum+=1
| 37.350427
| 85
| 0.415103
| 429
| 4,370
| 3.981352
| 0.296037
| 0.04918
| 0.07377
| 0.04918
| 0.170375
| 0.131148
| 0.131148
| 0.131148
| 0.093677
| 0.065574
| 0
| 0.027752
| 0.505263
| 4,370
| 117
| 86
| 37.350427
| 0.762257
| 0.094737
| 0
| 0.172414
| 0
| 0
| 0.027137
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.045977
| 0
| 0.045977
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a59e89d09e32fec1b404a96ad1edf1ccd223adb
| 8,871
|
py
|
Python
|
tests/test_preempt_return.py
|
vpv11110000/pyss
|
bc2226e2e66e0b551a09ae6ab6835b0bb6c7f32b
|
[
"MIT"
] | null | null | null |
tests/test_preempt_return.py
|
vpv11110000/pyss
|
bc2226e2e66e0b551a09ae6ab6835b0bb6c7f32b
|
[
"MIT"
] | 2
|
2017-09-05T11:12:05.000Z
|
2017-09-07T19:23:15.000Z
|
tests/test_preempt_return.py
|
vpv11110000/pyss
|
bc2226e2e66e0b551a09ae6ab6835b0bb6c7f32b
|
[
"MIT"
] | null | null | null |
# #!/usr/bin/python
# -*- coding: utf-8 -*-
# test_preempt_return.py
# pylint: disable=line-too-long,missing-docstring,bad-whitespace, unused-argument, too-many-locals
import sys
import os
import random
import unittest
DIRNAME_MODULE = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))) + os.sep
sys.path.append(DIRNAME_MODULE)
sys.path.append(DIRNAME_MODULE + "pyss" + os.sep)
from pyss import pyssobject
from pyss.pyss_model import PyssModel
from pyss.segment import Segment
from pyss.generate import Generate
from pyss.terminate import Terminate
from pyss import logger
from pyss.table import Table
from pyss.handle import Handle
from pyss.enter import Enter
from pyss.leave import Leave
from pyss.storage import Storage
from pyss.advance import Advance
from pyss.preempt import Preempt
from pyss.g_return import GReturn
from pyss.facility import Facility
from pyss.seize import Seize
from pyss.release import Release
from pyss.transfer import Transfer
from pyss.test import Test
from pyss.pyss_const import *
class TestPreemptReturn(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
# @unittest.skip("testing skipping test_preempt_return_001")
def test_preempt_return_001(self):
"""Тест Preempt - Return
Формируется один транзакт в момент времени 1.
Прерывает работу устройства F_1 на 5 единиц времени.
Выходит из модели в момент времени 6.
"""
logger.info("--- test_preempt_return_001 ----------------------------------")
### MODEL ----------------------------------
m = PyssModel()
sgm = Segment(m)
#
m[OPTIONS].setAllFalse()
MAX_TIME = 20
#
list_all_transact = []
#
MAX_TIME = 20
#
F_1 = "F_1"
# ОКУ
Facility(m, facilityName=F_1)
#
def funcTransactTo_list_all_transact(owner, transact):
# складируем транзакты в список
list_all_transact.append(transact)
### SEGMENT ----------------------------
# формируется одна заявка в момент времени 1
Generate(sgm, med_value=None,
modificatorFunc=None,
first_tx=1,
max_amount=1)
Handle(sgm, handlerFunc=funcTransactTo_list_all_transact)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))
#
Preempt(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY]))
#
Advance(sgm, meanTime=5, modificatorFunc=None)
GReturn(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:not self.assertNotIn(F_1, t[FACILITY]))
#
Terminate(sgm, deltaTerminate=0)
# ЗАПУСК ----------------------
m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME)
# ТЕСТЫ ----------------------
for t in list_all_transact:
self.assertEqual(t[TIME_CREATED], 1)
self.assertEqual(t[TERMINATED_TIME], 6)
print str(["%s:%s" % (k, t[k])
for k in t.keys() if k
in [TIME_CREATED, TERMINATED_TIME]])
# @unittest.skip("testing skipping test_preempt_return_002")
def test_preempt_return_002(self):
"""Тест Preempt - Return
Формируется транзакт A в момент времени 1.
Идёт на обработку устройством F_1 в течение 3 единиц времени.
Формируется транзакт B в момент времени 2.
Прерывает работу устройства на 5 единиц времени.
Транзакт B выходит из модели в момент времени 7.
Транзакт А выходит из модели в момент времени 9.
Обработка транзакта А была прервана с 2 по 7.
"""
logger.info("--- test_preempt_return_002 ----------------------------------")
### MODEL ----------------------------------
m = PyssModel()
sgm = Segment(m)
#
m[OPTIONS].setAllFalse()
MAX_TIME = 20
# CONSTS
TRANSACT_A = "A"
TRANSACT_B = "B"
#
list_all_transact = []
tA = []
tB = []
#
F_1 = "F_1"
# ОКУ
facility_1 = Facility(m, facilityName=F_1)
#
def funcTransactTo_list_all_transact(owner, transact):
# складируем транзакты в список
list_all_transact.append(transact)
def setTransactLabel(owner, transact):
if transact[NUM] == 1:
transact[LABEL] = TRANSACT_A
tA.append(transact)
elif transact[NUM] == 2:
transact[LABEL] = TRANSACT_B
tB.append(transact)
# функция проверки условия
def checkTest(o):
t=m.getCurrentTransact()
if t[LABEL] == TRANSACT_B:
return False
return True
def printAllTransact(owner, transact):
print "Time=%s" % str(m.getCurTime())
print "\n".join([str(t) for t in list_all_transact])
print "tA=%s" % str(tA[0])
print "tB=%s" % str(tB[0])
### SEGMENT ----------------------------
# формируется одна заявка в момент времени 1
Generate(sgm,
med_value=1,
modificatorFunc=None,
first_tx=1,
max_amount=2)
# вспомогательные операции
Handle(sgm, handlerFunc=funcTransactTo_list_all_transact)
Handle(sgm, handlerFunc=setTransactLabel)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))
#
# первый транзакт проходит, второй направляется к метке "to_preempt"
Test(sgm, funcCondition=checkTest, move2block="to_preempt")
# только первый транзакт
Seize(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY]))
#
Advance(sgm, meanTime=3, modificatorFunc=None)
Release(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))
#
Transfer(sgm, funcTransfer=lambda o, t: o.findBlockByLabel("to_term"))
#---
# только второй транзакт
Preempt(sgm, facilityName=F_1, label="to_preempt")
# test
# .addBlock(handle.Handle(handlerFunc=lambda o,t:self.assertEqual(tA[0][REMAIND_TIME], None)))
Handle(sgm, handlerFunc=printAllTransact)
Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY]))
#
Handle(sgm, handlerFunc=printAllTransact)
Advance(sgm, meanTime=5, modificatorFunc=None)
GReturn(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][REMAIND_TIME], 2))
Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][SCHEDULED_TIME], 9))
Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))
#
Handle(sgm, handlerFunc=printAllTransact)
# все транзакты
Terminate(sgm, label="to_term", deltaTerminate=0)
# ЗАПУСК ----------------------
m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME)
# ТЕСТЫ ----------------------
for t in list_all_transact:
# Формируется транзакт A в момент времени 1.
# Идёт на обработку устройством F_1 в течение 3 единиц времени.
# Формируется транзакт B в момент времени 2.
# Прерывает работу устройства на 5 единиц времени.
# Транзакт B выходит из модели в момент времени 7.
# Транзакт А выходит из модели в момент времени 9.
# Обработка транзакта А была прервана с 2 по 7.
print str(["%s:%s" % (k, t[k])
for k in t.keys() if k
in [TIME_CREATED, TERMINATED_TIME, LIFE_TIME_LIST]])
if t[LABEL] == TRANSACT_A:
self.assertEqual(t[TIME_CREATED], 1)
self.assertEqual(t[REMAIND_TIME], 2)
self.assertEqual(t[TERMINATED_TIME], 9)
self.assertListEqual(t[LIFE_TIME_LIST], [
{'start': 1, 'state': 'actived'},
{'start': 2, 'state': 'preempted'},
{'start': 7, 'state': 'actived'},
{'start': 9, 'state': 'deleted'}])
elif t[LABEL] == TRANSACT_B:
self.assertEqual(t[TIME_CREATED], 2)
self.assertEqual(t[TERMINATED_TIME], 7)
self.assertListEqual(t[LIFE_TIME_LIST], [
{'start': 2, 'state': 'actived'},
{'start': 7, 'state': 'deleted'}])
if __name__ == '__main__':
unittest.main(module="test_preempt_return")
| 35.342629
| 106
| 0.578289
| 1,021
| 8,871
| 4.89618
| 0.199804
| 0.009202
| 0.064013
| 0.041808
| 0.602721
| 0.556511
| 0.524505
| 0.458092
| 0.434687
| 0.417083
| 0
| 0.016611
| 0.294217
| 8,871
| 250
| 107
| 35.484
| 0.781824
| 0.153872
| 0
| 0.356643
| 0
| 0
| 0.048515
| 0.01676
| 0
| 0
| 0
| 0
| 0.132867
| 0
| null | null | 0.013986
| 0.167832
| null | null | 0.06993
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a61523d34a63b6c1b5541a6127f60a7a5d5ec7e
| 4,684
|
py
|
Python
|
PyBank/.ipynb_checkpoints/Pymain-checkpoint.py
|
yash5OG/PythonChallengeW3-Y5
|
4a20ea5bae2d88af5a7d56f43ddc63ac64eaee67
|
[
"MIT"
] | null | null | null |
PyBank/.ipynb_checkpoints/Pymain-checkpoint.py
|
yash5OG/PythonChallengeW3-Y5
|
4a20ea5bae2d88af5a7d56f43ddc63ac64eaee67
|
[
"MIT"
] | null | null | null |
PyBank/.ipynb_checkpoints/Pymain-checkpoint.py
|
yash5OG/PythonChallengeW3-Y5
|
4a20ea5bae2d88af5a7d56f43ddc63ac64eaee67
|
[
"MIT"
] | null | null | null |
{
"cells": [
{
"cell_type": "code",
"execution_count": 64,
"metadata": {},
"outputs": [],
"source": [
"# Import libraries\n",
"import os, csv"
]
},
{
"cell_type": "code",
"execution_count": 65,
"metadata": {},
"outputs": [],
"source": [
"#variables for the script\n",
"months = [] #list of months\n",
"pl =[] #list of monthly PL\n",
"pl_changes = [] #list of P&L Changes\n",
"n_months = 0 #count of months\n",
"pl_total = 0 #total of P&L\n",
"plc = 0 #variable to track PL changes\n",
"avg_pl_change = 0 #average of changes in PL\n",
"maxpl = 0 #maximum increase in profits\n",
"minpl = 0 #maximum decrease in losses\n",
"max_i = 0 #index for max pl\n",
"min_i = 0 #index for min pl\n",
"\n",
"#read the resource file\n",
"bankcsv = os.path.join(\".\", \"Resources\", \"budget_data.csv\") #set path\n",
"\n",
"\n",
"#read file\n",
"with open(bankcsv, 'r') as csv_file:\n",
" csv_reader = csv.reader(csv_file,delimiter=\",\")\n",
" header = next(csv_reader)\n",
" \n",
" #for loop to update the counters and lists\n",
" for row in csv_reader:\n",
" n_months += 1\n",
" pl_total += int(row[1])\n",
" pl.append(row[1])\n",
" months.append(row[0])"
]
},
{
"cell_type": "code",
"execution_count": 66,
"metadata": {},
"outputs": [],
"source": [
"# loop to track the PL change values\n",
"pl_changes = [] \n",
"plc = int(pl[0])\n",
"for i in range(1, len(pl)):\n",
" pl_changes.append(int(pl[i]) - plc)\n",
" plc = int(pl[i])\n",
" i += 1\n",
"#print(pl_changes)"
]
},
{
"cell_type": "code",
"execution_count": 67,
"metadata": {},
"outputs": [],
"source": [
"#calculate the average PL Changes, max and min\n",
"avg_pl_change = sum(pl_changes) / len(pl_changes)\n",
"maxpl = max(pl_changes)\n",
"minpl = min(pl_changes)\n",
"#print(avg_pl_change, maxpl, minpl)\n",
"#print(pl_changes.index(maxpl))\n",
"#print(len(pl_changes))"
]
},
{
"cell_type": "code",
"execution_count": 68,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Financial Analysis\n",
"---------------------------------------------------------------------\n",
"Total Months: 86\n",
"Total: $38382578\n",
"Average Change: $-2315.12\n",
"Greatest Increase in Profits: Feb-2012 ($1926159)\n",
"Greatest Decrease in Profits: Sep-2013 ($-2196167)\n"
]
}
],
"source": [
"#find dates for max and min PL changes\n",
"max_i = pl_changes.index(maxpl) +1 #adding +1 since the changes are calculated one row above\n",
"min_i = pl_changes.index(minpl) +1\n",
"\n",
"maxmonth = months[max_i]\n",
"minmonth = months[min_i]\n",
"\n",
"#print output to the terminal\n",
"\n",
"print(\"Financial Analysis\")\n",
"print(\"-\"*69)\n",
"print(f\"Total Months: {n_months}\")\n",
"print(f\"Total: ${round(pl_total,2)}\")\n",
"print(f\"Average Change: ${round(avg_pl_change,2)}\")\n",
"print(f\"Greatest Increase in Profits: {maxmonth} (${maxpl})\")\n",
"print(f\"Greatest Decrease in Profits: {minmonth} (${minpl})\")\n"
]
},
{
"cell_type": "code",
"execution_count": 69,
"metadata": {},
"outputs": [],
"source": [
"# write summary to txt file\n",
"output = os.path.join(\".\",\"Analysis\", \"summary.txt\")\n",
"\n",
"# use \"\\n\" to create a new line\n",
"with open(output, 'w') as output:\n",
" output.write(\"Financial Analysis\\n\")\n",
" output.write(\"-\"*69 + \"\\n\")\n",
" output.write(f\"Total Months: {n_months}\\n\")\n",
" output.write(f\"Total: ${round(pl_total,2)}\\n\")\n",
" output.write(f\"Average Change: ${round(avg_pl_change,2)}\\n\")\n",
" output.write(f\"Greatest Increase in Profits: {maxmonth} (${maxpl})\\n\")\n",
" output.write(f\"Greatest Decrease in Profits: {minmonth} (${minpl})\\n\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| 29.093168
| 104
| 0.51281
| 592
| 4,684
| 3.952703
| 0.256757
| 0.01453
| 0.035897
| 0.053846
| 0.230342
| 0.185897
| 0.142735
| 0.095727
| 0.02735
| 0
| 0
| 0.024327
| 0.254056
| 4,684
| 160
| 105
| 29.275
| 0.645392
| 0
| 0
| 0.20625
| 0
| 0.00625
| 0.599701
| 0.058497
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.0125
| 0
| 0.0125
| 0.075
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a64487109643353c0e84bbee6dfb1cf09044927
| 834
|
py
|
Python
|
beta_reconstruction/crystal_relations.py
|
LightForm-group/beta-reconstruction
|
67584f75ee08690226595c5f9dc75dfd164a11a0
|
[
"MIT"
] | null | null | null |
beta_reconstruction/crystal_relations.py
|
LightForm-group/beta-reconstruction
|
67584f75ee08690226595c5f9dc75dfd164a11a0
|
[
"MIT"
] | 1
|
2020-01-07T12:41:26.000Z
|
2020-01-07T12:50:40.000Z
|
beta_reconstruction/crystal_relations.py
|
LightForm-group/beta-reconstruction
|
67584f75ee08690226595c5f9dc75dfd164a11a0
|
[
"MIT"
] | null | null | null |
import numpy as np
from defdap.quat import Quat
hex_syms = Quat.symEqv("hexagonal")
# subset of hexagonal symmetries that give unique orientations when the
# Burgers transformation is applied
unq_hex_syms = [
hex_syms[0],
hex_syms[5],
hex_syms[4],
hex_syms[2],
hex_syms[10],
hex_syms[11]
]
cubic_syms = Quat.symEqv("cubic")
# subset of cubic symmetries that give unique orientations when the
# Burgers transformation is applied
unq_cub_syms = [
cubic_syms[0],
cubic_syms[7],
cubic_syms[9],
cubic_syms[1],
cubic_syms[22],
cubic_syms[16],
cubic_syms[12],
cubic_syms[15],
cubic_syms[4],
cubic_syms[8],
cubic_syms[21],
cubic_syms[20]
]
# HCP -> BCC
burg_eulers = np.array([135, 90, 354.74]) * np.pi / 180
burg_trans = Quat.fromEulerAngles(*burg_eulers).conjugate
| 22.540541
| 71
| 0.689448
| 127
| 834
| 4.314961
| 0.448819
| 0.213504
| 0.051095
| 0.087591
| 0.277372
| 0.277372
| 0.277372
| 0.277372
| 0.277372
| 0.277372
| 0
| 0.058122
| 0.195444
| 834
| 36
| 72
| 23.166667
| 0.758569
| 0.256595
| 0
| 0
| 0
| 0
| 0.022801
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a66a4e65b6c15a92cb15d2436631fabac501551
| 4,314
|
py
|
Python
|
pint/testsuite/test_definitions.py
|
s-avni/pint
|
4e33d44437991bf7c5e30977643f42ebd6ed40da
|
[
"BSD-3-Clause"
] | null | null | null |
pint/testsuite/test_definitions.py
|
s-avni/pint
|
4e33d44437991bf7c5e30977643f42ebd6ed40da
|
[
"BSD-3-Clause"
] | null | null | null |
pint/testsuite/test_definitions.py
|
s-avni/pint
|
4e33d44437991bf7c5e30977643f42ebd6ed40da
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import division, unicode_literals, print_function, absolute_import
from pint.util import (UnitsContainer)
from pint.converters import (ScaleConverter, OffsetConverter)
from pint.definitions import (Definition, PrefixDefinition, UnitDefinition,
DimensionDefinition, AliasDefinition)
from pint.testsuite import BaseTestCase
class TestDefinition(BaseTestCase):
def test_invalid(self):
self.assertRaises(ValueError, Definition.from_string, 'x = [time] * meter')
self.assertRaises(ValueError, Definition.from_string, '[x] = [time] * meter')
def test_prefix_definition(self):
for definition in ('m- = 1e-3', 'm- = 10**-3', 'm- = 0.001'):
x = Definition.from_string(definition)
self.assertIsInstance(x, PrefixDefinition)
self.assertEqual(x.name, 'm')
self.assertEqual(x.aliases, ())
self.assertEqual(x.converter.to_reference(1000), 1)
self.assertEqual(x.converter.from_reference(0.001), 1)
self.assertEqual(str(x), 'm')
x = Definition.from_string('kilo- = 1e-3 = k-')
self.assertIsInstance(x, PrefixDefinition)
self.assertEqual(x.name, 'kilo')
self.assertEqual(x.aliases, ())
self.assertEqual(x.symbol, 'k')
self.assertEqual(x.converter.to_reference(1000), 1)
self.assertEqual(x.converter.from_reference(.001), 1)
x = Definition.from_string('kilo- = 1e-3 = k- = anotherk-')
self.assertIsInstance(x, PrefixDefinition)
self.assertEqual(x.name, 'kilo')
self.assertEqual(x.aliases, ('anotherk', ))
self.assertEqual(x.symbol, 'k')
self.assertEqual(x.converter.to_reference(1000), 1)
self.assertEqual(x.converter.from_reference(.001), 1)
def test_baseunit_definition(self):
x = Definition.from_string('meter = [length]')
self.assertIsInstance(x, UnitDefinition)
self.assertTrue(x.is_base)
self.assertEqual(x.reference, UnitsContainer({'[length]': 1}))
def test_unit_definition(self):
x = Definition.from_string('coulomb = ampere * second')
self.assertIsInstance(x, UnitDefinition)
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, ScaleConverter)
self.assertEqual(x.converter.scale, 1)
self.assertEqual(x.reference, UnitsContainer(ampere=1, second=1))
x = Definition.from_string('faraday = 96485.3399 * coulomb')
self.assertIsInstance(x, UnitDefinition)
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, ScaleConverter)
self.assertEqual(x.converter.scale, 96485.3399)
self.assertEqual(x.reference, UnitsContainer(coulomb=1))
x = Definition.from_string('degF = 9 / 5 * kelvin; offset: 255.372222')
self.assertIsInstance(x, UnitDefinition)
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, OffsetConverter)
self.assertEqual(x.converter.scale, 9/5)
self.assertEqual(x.converter.offset, 255.372222)
self.assertEqual(x.reference, UnitsContainer(kelvin=1))
x = Definition.from_string('turn = 6.28 * radian = _ = revolution = = cycle = _')
self.assertIsInstance(x, UnitDefinition)
self.assertEqual(x.name, 'turn')
self.assertEqual(x.aliases, ('revolution', 'cycle'))
self.assertEqual(x.symbol, 'turn')
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, ScaleConverter)
self.assertEqual(x.converter.scale, 6.28)
self.assertEqual(x.reference, UnitsContainer(radian=1))
def test_dimension_definition(self):
x = DimensionDefinition('[time]', '', (), converter='')
self.assertTrue(x.is_base)
self.assertEqual(x.name, '[time]')
x = Definition.from_string('[speed] = [length]/[time]')
self.assertIsInstance(x, DimensionDefinition)
self.assertEqual(x.reference, UnitsContainer({'[length]': 1, '[time]': -1}))
def test_alias_definition(self):
x = Definition.from_string("@alias meter = metro = metr")
self.assertIsInstance(x, AliasDefinition)
self.assertEqual(x.name, "meter")
self.assertEqual(x.aliases, ("metro", "metr"))
| 44.474227
| 89
| 0.660176
| 477
| 4,314
| 5.878407
| 0.194969
| 0.171184
| 0.17689
| 0.098074
| 0.618759
| 0.502496
| 0.46505
| 0.429743
| 0.36234
| 0.322397
| 0
| 0.028338
| 0.206537
| 4,314
| 96
| 90
| 44.9375
| 0.790827
| 0.004868
| 0
| 0.35443
| 0
| 0
| 0.098112
| 0
| 0
| 0
| 0
| 0
| 0.683544
| 1
| 0.075949
| false
| 0
| 0.063291
| 0
| 0.151899
| 0.012658
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a69c6a560d7f1d6a12a9bb69281971b56733693
| 1,637
|
py
|
Python
|
setup.py
|
xbabka01/filetype.py
|
faba42b86988bd21a50d5b20919ecff0c6a84957
|
[
"MIT"
] | null | null | null |
setup.py
|
xbabka01/filetype.py
|
faba42b86988bd21a50d5b20919ecff0c6a84957
|
[
"MIT"
] | null | null | null |
setup.py
|
xbabka01/filetype.py
|
faba42b86988bd21a50d5b20919ecff0c6a84957
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
from setuptools import find_packages, setup
setup(
name='filetype',
version='1.0.7',
description='Infer file type and MIME type of any file/buffer. '
'No external dependencies.',
long_description=codecs.open('README.rst', 'r',
encoding='utf-8', errors='ignore').read(),
keywords='file libmagic magic infer numbers magicnumbers discovery mime '
'type kind',
url='https://github.com/h2non/filetype.py',
download_url='https://github.com/h2non/filetype.py/tarball/master',
author='Tomas Aparicio',
author_email='[email protected]',
license='MIT',
license_files=['LICENSE'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: System',
'Topic :: System :: Filesystems',
'Topic :: Utilities'],
platforms=['any'],
packages=find_packages(exclude=['dist', 'build', 'docs', 'tests',
'examples']),
package_data={'filetype': ['LICENSE', '*.md']},
zip_safe=True)
| 38.069767
| 77
| 0.588882
| 167
| 1,637
| 5.724551
| 0.622754
| 0.099372
| 0.130753
| 0.135983
| 0.066946
| 0.066946
| 0.066946
| 0
| 0
| 0
| 0
| 0.013992
| 0.257789
| 1,637
| 42
| 78
| 38.97619
| 0.77284
| 0.025657
| 0
| 0
| 0
| 0
| 0.52919
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.052632
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a6d51f8a422fff8bc79749ffb6d71189dc006bc
| 2,509
|
py
|
Python
|
vframe_cli/commands/templates/image-mp.py
|
julescarbon/vframe
|
0798841fa9eb7e1252e4cdf71d68d991c26acab8
|
[
"MIT"
] | 1
|
2021-05-15T11:06:39.000Z
|
2021-05-15T11:06:39.000Z
|
vframe_cli/commands/templates/image-mp.py
|
julescarbon/vframe
|
0798841fa9eb7e1252e4cdf71d68d991c26acab8
|
[
"MIT"
] | null | null | null |
vframe_cli/commands/templates/image-mp.py
|
julescarbon/vframe
|
0798841fa9eb7e1252e4cdf71d68d991c26acab8
|
[
"MIT"
] | null | null | null |
#############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2020 Adam Harvey and VFRAME
# https://vframe.io
#
#############################################################################
import click
@click.command('')
@click.option('-i', '--input', 'opt_dir_in', required=True)
@click.option('-r', '--recursive', 'opt_recursive', is_flag=True)
@click.option('-e', '--ext', 'opt_exts', default=['jpg', 'png'], multiple=True,
help='Glob extension')
@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
help='Slice list of files')
@click.option('-t', '--threads', 'opt_threads', default=None)
@click.pass_context
def cli(ctx, opt_dir_in, opt_recursive, opt_exts, opt_slice, opt_threads):
"""Multiprocessor image template"""
# ------------------------------------------------
# imports
from os.path import join
from pathlib import Path
from dataclasses import asdict
import numpy as np
import cv2 as cv
from tqdm import tqdm
from pathos.multiprocessing import ProcessingPool as Pool
from pathos.multiprocessing import cpu_count
from vframe.settings import app_cfg
from vframe.settings.modelzoo_cfg import modelzoo
from vframe.models.dnn import DNN
from vframe.image.dnn_factory import DNNFactory
from vframe.utils import file_utils
from vframe.utils.video_utils import FileVideoStream, mediainfo
log = app_cfg.LOG
# set N threads
if not opt_threads:
opt_threads = cpu_count() # maximum
# glob items
fp_items = file_utils.glob_multi(opt_dir_in, opt_exts, recursive=opt_recursive)
if any(opt_slice):
fp_items = fp_items[opt_slice[0]:opt_slice[1]]
log.info(f'Processing: {len(fp_items):,} files')
# -----------------------------------------------------------
# start pool worker
def pool_worker(pool_item):
# init threaded video reader
fp = pool_item['fp']
result = {'fp': fp}
# add media metadata
im = cv.imread(fp)
for i in range(20):
im = cv.blur(im, (35,35))
return result
# end pool worker
# -----------------------------------------------------------
# convert file list into object with
pool_items = [{'fp': fp} for fp in fp_items]
# init processing pool iterator
# use imap instead of map via @hkyi Stack Overflow 41920124
desc = f'image-mp x{opt_threads}'
with Pool(opt_threads) as p:
pool_results = list(tqdm(p.imap(pool_worker, pool_items), total=len(fp_items), desc=desc))
| 28.83908
| 94
| 0.610602
| 326
| 2,509
| 4.552147
| 0.432515
| 0.040431
| 0.016173
| 0.014825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009915
| 0.155839
| 2,509
| 87
| 94
| 28.83908
| 0.690746
| 0.209247
| 0
| 0
| 0
| 0
| 0.111296
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0.023256
| 0.348837
| 0
| 0.418605
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
8a6dd286ad198b0a16465871a4cd84854d419ad0
| 1,824
|
py
|
Python
|
lib/galaxy/tool_util/deps/container_resolvers/__init__.py
|
sneumann/galaxy
|
f6011bab5b8adbabae4986a45849bb9158ffc8bb
|
[
"CC-BY-3.0"
] | 1
|
2019-07-27T19:30:55.000Z
|
2019-07-27T19:30:55.000Z
|
lib/galaxy/tool_util/deps/container_resolvers/__init__.py
|
userssss/galaxy
|
9662164ad68b39adf5a5606a7aa8e388f6a79f1e
|
[
"CC-BY-3.0"
] | 4
|
2021-02-08T20:28:34.000Z
|
2022-03-02T02:52:55.000Z
|
lib/galaxy/tool_util/deps/container_resolvers/__init__.py
|
userssss/galaxy
|
9662164ad68b39adf5a5606a7aa8e388f6a79f1e
|
[
"CC-BY-3.0"
] | 1
|
2018-05-30T07:38:54.000Z
|
2018-05-30T07:38:54.000Z
|
"""The module defines the abstract interface for resolving container images for tool execution."""
from abc import (
ABCMeta,
abstractmethod,
abstractproperty,
)
import six
from galaxy.util.dictifiable import Dictifiable
@six.python_2_unicode_compatible
@six.add_metaclass(ABCMeta)
class ContainerResolver(Dictifiable):
"""Description of a technique for resolving container images for tool execution."""
# Keys for dictification.
dict_collection_visible_keys = ['resolver_type', 'can_uninstall_dependencies']
can_uninstall_dependencies = False
def __init__(self, app_info=None, **kwds):
"""Default initializer for ``ContainerResolver`` subclasses."""
self.app_info = app_info
self.resolver_kwds = kwds
def _get_config_option(self, key, default=None):
"""Look in resolver-specific settings for option and then fallback to
global settings.
"""
if self.app_info and hasattr(self.app_info, key):
return getattr(self.app_info, key)
else:
return default
@abstractmethod
def resolve(self, enabled_container_types, tool_info, **kwds):
"""Find a container matching all supplied requirements for tool.
The supplied argument is a :class:`galaxy.tool_util.deps.containers.ToolInfo` description
of the tool and its requirements.
"""
@abstractproperty
def resolver_type(self):
"""Short label for the type of container resolution."""
def _container_type_enabled(self, container_description, enabled_container_types):
"""Return a boolean indicating if the specified container type is enabled."""
return container_description.type in enabled_container_types
def __str__(self):
return "%s[]" % self.__class__.__name__
| 33.777778
| 98
| 0.707237
| 215
| 1,824
| 5.75814
| 0.44186
| 0.033926
| 0.044426
| 0.043619
| 0.069467
| 0.069467
| 0.069467
| 0
| 0
| 0
| 0
| 0.000695
| 0.211075
| 1,824
| 53
| 99
| 34.415094
| 0.859625
| 0.354167
| 0
| 0
| 0
| 0
| 0.039091
| 0.023636
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214286
| false
| 0
| 0.107143
| 0.035714
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a748a255fe78209cc5338aaab9ff134d24befab
| 1,134
|
py
|
Python
|
baopig/ressources/ressources.py
|
ChreSyr/baopig
|
6264ab9a851b1ed0a031292abe7f159a53b3fc5e
|
[
"MIT"
] | null | null | null |
baopig/ressources/ressources.py
|
ChreSyr/baopig
|
6264ab9a851b1ed0a031292abe7f159a53b3fc5e
|
[
"MIT"
] | null | null | null |
baopig/ressources/ressources.py
|
ChreSyr/baopig
|
6264ab9a851b1ed0a031292abe7f159a53b3fc5e
|
[
"MIT"
] | null | null | null |
from baopig.pybao.objectutilities import Object
from baopig.pybao.issomething import *
class RessourcePack:
def config(self, **kwargs):
for name, value in kwargs.items():
self.__setattr__('_'+name, value)
class FontsRessourcePack(RessourcePack):
def __init__(self,
file=None,
height=15,
color=(0, 0, 0),
):
assert is_color(color)
self._file = file
self._height = height
self._color = color
file = property(lambda self: self._file)
color = property(lambda self: self._color)
height = property(lambda self: self._height)
class ScenesRessourcePack(RessourcePack):
def __init__(self,
background_color=(170, 170, 170),
):
assert is_color(background_color)
self._background_color = background_color
background_color = property(lambda self: self._background_color)
# TODO : ButtonRessourcePack.style.create_surface(size)
class _RessourcePack:
def __init__(self):
self.font = FontsRessourcePack()
self.scene = ScenesRessourcePack()
ressources = _RessourcePack()
| 19.894737
| 68
| 0.666667
| 120
| 1,134
| 6
| 0.358333
| 0.125
| 0.1
| 0.122222
| 0.075
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016222
| 0.238977
| 1,134
| 56
| 69
| 20.25
| 0.818076
| 0.046737
| 0
| 0.129032
| 0
| 0
| 0.000929
| 0
| 0
| 0
| 0
| 0.017857
| 0.064516
| 1
| 0.129032
| false
| 0
| 0.064516
| 0
| 0.451613
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a75b4a74e6ecd635d9404db9ea5df06d5114069
| 10,282
|
py
|
Python
|
bufr_extract_unique_stations.py
|
glamod/glamod-misc
|
4c8743dd3aa436377017c49bec990b11fe1c6f7d
|
[
"BSD-3-Clause"
] | null | null | null |
bufr_extract_unique_stations.py
|
glamod/glamod-misc
|
4c8743dd3aa436377017c49bec990b11fe1c6f7d
|
[
"BSD-3-Clause"
] | 16
|
2018-10-23T08:06:18.000Z
|
2018-10-30T10:20:01.000Z
|
bufr_extract_unique_stations.py
|
glamod/glamod-misc
|
4c8743dd3aa436377017c49bec990b11fe1c6f7d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python2.7
"""
Extract unique set of station locations (and names) along with number of obs
RJHD - Exeter - October 2017
"""
# ECMWF import defaults
import traceback
import sys
from eccodes import *
# RJHD imports
import cartopy
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import gc
VERBOSE = 1 # verbose error reporting.
ATTRS = [
'code',
'units',
'scale',
'reference',
'width'
]
INTMDI = 2147483647
#***************************************************
def process_file(infilename, station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year):
infile = open(infilename)
year = int(infilename.split(".")[0].split("_")[-1])
cmatch = 0
counter = 0
# loop all messages (with stop statement)
while 1:
"""OPEN MESSAGE"""
# get handle for message
bufr = codes_bufr_new_from_file(infile)
if bufr is None:
break
if counter%100000 == 0:
print "message: {:d}".format(counter)
# we need to instruct ecCodes to expand all the descriptors
# i.e. unpack the data values
codes_set(bufr, 'unpack', 1)
"""ITERATOR TO EXTRACT KEYS"""
these_keys = []
# get BUFR key iterator
iterid = codes_bufr_keys_iterator_new(bufr)
# loop over the keys
while codes_bufr_keys_iterator_next(iterid):
# print key name
keyname = codes_bufr_keys_iterator_get_name(iterid)
# print(" %s" % keyname)
these_keys += [keyname]
# delete the key iterator
codes_bufr_keys_iterator_delete(iterid)
# Use these to select obs from land/marine surface
name_keys = ["#1#shipOrMobileLandStationIdentifier", "#1#stationNumber"]
processed = False
for nk in name_keys:
if nk in these_keys:
try:
name = codes_get(bufr, nk)
lat = codes_get(bufr, "#1#latitude")
lon = codes_get(bufr, "#1#longitude")
sloc = tloc = nloc = [-1]
if name in station_names:
sloc, = np.where(station_names == name)
if lat in latitudes:
tloc, = np.where(latitudes == lat)
if lon in longitudes:
nloc, = np.where(longitudes == lon)
if tloc[0] == -1 and nloc[0] == -1:
# if not in list, then add
station_names = np.append(station_names, name)
latitudes = np.append(latitudes, lat)
longitudes = np.append(longitudes, lon)
observations = np.append(observations, 1)
start_year = np.append(start_year, year)
end_year = np.append(end_year, year)
# allow splitting of land and marine/mobile
if nk == "#1#stationNumber":
fixed_station = np.append(fixed_station, True)
else:
fixed_station = np.append(fixed_station, False)
elif (tloc[0] != -1 or nloc[0] != -1) and tloc[0] != nloc[0]:
# add if one element of position is unique
station_names = np.append(station_names, name)
latitudes = np.append(latitudes, lat)
longitudes = np.append(longitudes, lon)
observations = np.append(observations, 1)
start_year = np.append(start_year, year)
end_year = np.append(end_year, year)
# allow splitting of land and marine/mobile
if nk == "#1#stationNumber":
fixed_station = np.append(fixed_station, True)
else:
fixed_station = np.append(fixed_station, False)
elif tloc[0] != -1 and tloc[0] == nloc[0]:
# if position matches exactly, up observation counter
observations[tloc[0]] += 1
end_year[tloc[0]] = year
# allow splitting of land and marine/mobile
if nk == "#1#stationNumber":
if fixed_station[tloc[0]] != True:
# if listed as land and now marine, take marine
fixed_station[tloc[0]] = False
else:
if fixed_station[tloc[0]] != False:
# easier to leave as mobile/marine than to move
# hopefully will stand out later
pass
else:
cmatch += 1
processed = True
except CodesInternalError:
raw_input("key error?")
# check for new keys which give station ID information
if not processed:
other_keys = ["#1#carrierBalloonOrAircraftIdentifier", "#1#aircraftFlightNumber"]
new_key = True
for ok in other_keys:
if ok in these_keys: new_key = False
if new_key:
raw_input(these_keys)
# if counter > 10000: break
counter += 1
codes_release(bufr)
# print "Number of unique locations in this year: {}".format(len(latitudes))
return station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year # process_file
#***************************************************
def scatter_map(outname, data, lons, lats, cmap, bounds, cb_label, title = "", figtext = "", doText = False):
'''
Standard scatter map
:param str outname: output filename root
:param array data: data to plot
:param array lons: longitudes
:param array lats: latitudes
:param obj cmap: colourmap to use
:param array bounds: bounds for discrete colormap
:param str cb_label: colorbar label
'''
norm=mpl.cm.colors.BoundaryNorm(bounds,cmap.N)
fig = plt.figure(figsize =(10,6.5))
plt.clf()
ax = plt.axes([0.05, 0.10, 0.90, 0.90], projection=cartopy.crs.Robinson())
ax.gridlines() #draw_labels=True)
ax.add_feature(cartopy.feature.LAND, zorder = 0, facecolor = "0.9", edgecolor = "k")
ax.coastlines()
ext = ax.get_extent() # save the original extent
scatter = plt.scatter(lons, lats, c = data, cmap = cmap, norm = norm, s=10, \
transform = cartopy.crs.Geodetic(), edgecolor = "r", linewidth = 0.1)
cb=plt.colorbar(scatter, orientation = 'horizontal', pad = 0.05, fraction = 0.05, \
aspect = 30, ticks = bounds[1:-1], label = cb_label, drawedges=True)
# thicken border of colorbar and the dividers
# http://stackoverflow.com/questions/14477696/customizing-colorbar-border-color-on-matplotlib
# cb.set_ticklabels(["{:g}".format(b) for b in bounds[1:-1]])
# cb.outline.set_color('k')
# cb.outline.set_linewidth(2)
cb.dividers.set_color('k')
cb.dividers.set_linewidth(2)
ax.set_extent(ext, ax.projection) # fix the extent change from colormesh
plt.title(title)
if doText: plt.text(0.01, 0.98, "#stations: {}".format(data.shape[0]), transform = ax.transAxes, fontsize = 10)
plt.savefig(outname)
plt.close()
return # scatter_map
#***************************************************
def main(ms = "era40_", year = 1980):
LOCS = "/group_workspaces/jasmin2/c3s311a_lot2/data/incoming/mars/v20170628/data/"
print year
station_names = np.array([])
fixed_station = np.array([])
latitudes = np.array([])
longitudes = np.array([])
observations = np.array([])
start_year = np.array([])
end_year = np.array([])
if ms == "erai_" and year < 1979:
return
else:
INFILE = "{}mars_{}{}.bufr".format(LOCS, ms, year)
try:
station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year = \
process_file(INFILE, station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year)
except CodesInternalError as err:
if VERBOSE:
traceback.print_exc(file=sys.stderr)
else:
sys.stderr.write(err.msg + '\n')
land = np.where(np.array(fixed_station) == True)
marine = np.where(np.array(fixed_station) == False)
bounds = np.linspace(0,max(observations),10).astype(int)
cmap = plt.cm.YlOrRd_r
if ms == "erai_":
title = "MARS - SYNOP - {}".format(year)
else:
title = "MARS - ERA40 - {}".format(year)
scatter_map("mars_{}{}_land_observations.png".format(ms, year), observations[land], longitudes[land], latitudes[land], cmap, bounds, "Number of Observations", title, doText = True)
scatter_map("mars_{}{}_marine_observations.png".format(ms, year), observations[marine], longitudes[marine], latitudes[marine], cmap, bounds, "Number of Observations", title)
station_names = 0
fixed_station = 0
latitudes = 0
longitudes = 0
observations = 0
start_year = 0
end_year = 0
land = 0
marine = 0
gc.collect()
return # main
#***************************************************
if __name__ == "__main__":
import argparse
# set up keyword arguments
parser = argparse.ArgumentParser()
parser.add_argument('--ms', dest='ms', action='store', default = "era40_",
help='Run on ERA40 ["era40_"] (default) or ERA-I ["erai_"] data')
parser.add_argument('--year', dest='year', action='store', default = 1980,
help='Which year to process - default 1980')
args = parser.parse_args()
main(ms = args.ms, year = args.year)
sys.exit()
#***************************************************
# END
#***************************************************
| 33.167742
| 184
| 0.540751
| 1,142
| 10,282
| 4.74606
| 0.283713
| 0.042066
| 0.012177
| 0.017712
| 0.240959
| 0.228967
| 0.192066
| 0.1869
| 0.1869
| 0.1869
| 0
| 0.025263
| 0.326298
| 10,282
| 309
| 185
| 33.275081
| 0.757182
| 0.15814
| 0
| 0.181287
| 0
| 0
| 0.080679
| 0.028656
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.005848
| 0.052632
| null | null | 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a7922d582e70ee076c3374be8cdb74d33423c9b
| 1,038
|
py
|
Python
|
tests/ast/nodes/test_from_node.py
|
upgradvisor/vyper
|
642884ea938a25793c1b2fac866e8458e63a7b49
|
[
"Apache-2.0"
] | 1,471
|
2017-12-25T05:47:57.000Z
|
2019-11-19T07:47:53.000Z
|
tests/ast/nodes/test_from_node.py
|
upgradvisor/vyper
|
642884ea938a25793c1b2fac866e8458e63a7b49
|
[
"Apache-2.0"
] | 915
|
2019-11-21T05:48:16.000Z
|
2022-03-31T23:51:03.000Z
|
tests/ast/nodes/test_from_node.py
|
upgradvisor/vyper
|
642884ea938a25793c1b2fac866e8458e63a7b49
|
[
"Apache-2.0"
] | 321
|
2017-12-25T16:37:21.000Z
|
2019-11-15T17:44:06.000Z
|
from vyper import ast as vy_ast
def test_output_class():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert isinstance(new_node, vy_ast.Int)
def test_source():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert old_node.src == new_node.src
assert old_node.node_source_code == new_node.node_source_code
def test_kwargs():
old_node = vy_ast.parse_to_ast("42").body[0].value
new_node = vy_ast.Int.from_node(old_node, value=666)
assert old_node.value == 42
assert new_node.value == 666
def test_compare_nodes():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert not vy_ast.compare_nodes(old_node, new_node)
def test_new_node_has_no_parent():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert new_node._parent is None
assert new_node._depth == 0
| 25.95
| 65
| 0.719653
| 186
| 1,038
| 3.629032
| 0.198925
| 0.145185
| 0.146667
| 0.106667
| 0.561481
| 0.539259
| 0.539259
| 0.506667
| 0.506667
| 0.506667
| 0
| 0.037166
| 0.17052
| 1,038
| 39
| 66
| 26.615385
| 0.746806
| 0
| 0
| 0.375
| 0
| 0
| 0.032755
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.208333
| false
| 0
| 0.041667
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a79bd5eb2532e1ffdd3b87d6be696b8303afc7f
| 2,624
|
py
|
Python
|
generator/modules/opencv.py
|
dayta-ai/deepo
|
fa720e39052e63adfe0f2b9dbd8444a0d69c2540
|
[
"MIT"
] | 1
|
2021-11-18T18:34:29.000Z
|
2021-11-18T18:34:29.000Z
|
generator/modules/opencv.py
|
dayta-ai/deepo
|
fa720e39052e63adfe0f2b9dbd8444a0d69c2540
|
[
"MIT"
] | null | null | null |
generator/modules/opencv.py
|
dayta-ai/deepo
|
fa720e39052e63adfe0f2b9dbd8444a0d69c2540
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .__module__ import Module, dependency, source, version
from .tools import Tools
from .boost import Boost
from .python import Python
@dependency(Tools, Python, Boost)
@source('git')
@version('4.0.1')
class Opencv(Module):
def build(self):
return r'''
RUN ln -fs /usr/share/zoneinfo/Asia/Hong_Kong /etc/localtime && \
DEBIAN_FRONTEND=noninteractive \
add-apt-repository "deb http://security.ubuntu.com/ubuntu xenial-security main" && \
apt update && \
$APT_INSTALL \
libatlas-base-dev \
libgflags-dev \
libgoogle-glog-dev \
libhdf5-serial-dev \
libleveldb-dev \
liblmdb-dev \
libprotobuf-dev \
libsnappy-dev \
protobuf-compiler \
libopencv-dev \
yasm \
libjpeg-dev \
libjasper-dev \
libavcodec-dev \
libavformat-dev \
libswscale-dev \
libdc1394-22-dev \
libv4l-dev \
libtbb-dev \
libqt4-dev \
libgtk2.0-dev \
libfaac-dev \
libmp3lame-dev \
libopencore-amrnb-dev \
libopencore-amrwb-dev \
libtheora-dev \
libvorbis-dev \
libxvidcore-dev \
x264 \
v4l-utils \
ffmpeg \
&& \
$GIT_CLONE --branch {0} https://github.com/opencv/opencv opencv && \
$GIT_CLONE --branch {0} https://github.com/opencv/opencv_contrib.git opencv_contrib && \
mkdir -p opencv/build && cd opencv/build && \
cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D WITH_IPP=OFF \
-D WITH_CUDA=OFF \
-D WITH_TBB=ON \
-D WITH_V4L=ON \
-D WITH_QT=ON \
-D WITH_OPENCL=ON \
-D WITH_GTK=ON \
-D WITH_LIBV4L=ON \
-D BUILD_TESTS=OFF \
-D BUILD_PERF_TESTS=OFF \
-D WITH_FFMPEG=ON \
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules \
.. && \
make -j"$(nproc)" install && \
ln -s /usr/local/include/opencv4/opencv2 /usr/local/include/opencv2
'''.format(self.version)
| 35.945205
| 100
| 0.463796
| 249
| 2,624
| 4.763052
| 0.497992
| 0.037943
| 0.029511
| 0.025295
| 0.06914
| 0.06914
| 0.06914
| 0.06914
| 0.06914
| 0
| 0
| 0.018256
| 0.436357
| 2,624
| 72
| 101
| 36.444444
| 0.783638
| 0.008003
| 0
| 0
| 0
| 0.044118
| 0.889658
| 0.105729
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014706
| false
| 0
| 0.058824
| 0.014706
| 0.102941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a7a7334b3428135d28ee8a3da56e39eed250254
| 1,564
|
py
|
Python
|
day16/solve16.py
|
jmacarthur/aoc2017
|
2a3096aabf464ef52c05f9437498035cfb5ca1a6
|
[
"MIT"
] | null | null | null |
day16/solve16.py
|
jmacarthur/aoc2017
|
2a3096aabf464ef52c05f9437498035cfb5ca1a6
|
[
"MIT"
] | null | null | null |
day16/solve16.py
|
jmacarthur/aoc2017
|
2a3096aabf464ef52c05f9437498035cfb5ca1a6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
import copy
stage_length = 16
stage = map(chr, range(ord('a'),ord('a')+stage_length))
def spin(amount):
"""To save time, this function isn't used except at the end.
Normally, a counter marks the start of the stage and this changes
instead. """
global stage
stage = stage[amount:] + stage[:amount]
def swap(pos1, pos2):
global stage
(stage[pos1], stage[pos2]) = (stage[pos2], stage[pos1])
with open(sys.argv[1], 'rt') as f:
program = ",".join(f.readlines()).split(",")
n = 0
pos = 0
arguments_list = [x[1:].strip().split("/") for x in program]
action_list = [x[0] for x in program]
history = []
# Change this to 1 for the solution to part 1.
iterations = 1000000000
while n<iterations:
for s in range(0,len(program)):
arguments = arguments_list[s]
if action_list[s] == 's':
pos += stage_length-int(arguments[0])
elif action_list[s] == 'x':
swap((int(arguments[0])+pos)%stage_length, (int(arguments[1])+pos)%stage_length)
elif action_list[s] == 'p':
pos1 = stage.index(arguments[0])
pos2 = stage.index(arguments[1])
swap(pos1, pos2)
if stage in history:
print("Duplicate found: %r at index %d matches at stage %d"%(stage, history.index(stage), n))
loop_length = n - history.index(stage)
complete_cycles = (iterations - n) / loop_length
n += complete_cycles * loop_length
history.append(copy.copy(stage))
n += 1
spin(pos % stage_length)
print "".join(stage)
| 30.076923
| 101
| 0.621483
| 229
| 1,564
| 4.170306
| 0.375546
| 0.06911
| 0.058639
| 0.027225
| 0.05445
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029826
| 0.228261
| 1,564
| 51
| 102
| 30.666667
| 0.761392
| 0.039003
| 0
| 0.052632
| 0
| 0
| 0.045052
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.052632
| null | null | 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a82d93e4ba8abbe55f44853090dbccbc8c6e819
| 48,277
|
py
|
Python
|
edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/GenericHazards.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/GenericHazards.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/GenericHazards.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | 1
|
2021-10-30T00:03:05.000Z
|
2021-10-30T00:03:05.000Z
|
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 05/07/2015 4027 randerso Migrated A1 OB9.16 code to A2
# 06/17/2015 4027 dgilling Perform case-insensitive
# comparisons in foundCTAs.
# 07/13/2015 4648 randerso Fix bullets in follow up products
# 02/24/2016 5411 randerso Make bullet headers upper case
# 07/15/2016 5749 randerso Replaced ellipses with commas in hazardBodyText
#
##
# This is a base file that is not intended to be overridden.
##
#-------------------------------------------------------------------------
# Description: This product is a template for creating Hazard Products.
#-------------------------------------------------------------------------
# Copying:
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#-------------------------------------------------------------------------
# Standard and Local file names and Locations:
# GenericHazards
#-------------------------------------------------------------------------
# Customization Points:
#
# DEFINITION SECTION
#
# Required Configuration Items:
#
# displayName If not None, defines how product appears in GFE GUI
#
# You must set the following:
#
# productName defines name of product e.g. "Zone Forecast Product"
# fullStationID Full station identifier, 4 letter, such as "KSLC".
# wmoID WMO ID code for product header, such as "FOUS45"
# pil Product pil, such as "SFTBOS"
# areaName (opt.) Area name for product header, such as "Western New York"
# wfoCityState City,state that the WFO is located in, such as "Buffalo NY"
#
# Optional Configuration Items
#
# mapNameForCombinations Name of the map background that is used for
# creating/editing the combinations file. This must
# be defined or the GFE zone combiner
# database Source database for product. Can be "Official",
# "Fcst" or "ISC"
# outputFile Defines the output location of the finished product.
# Product is saved if autoWrite is 1.
# debug If on, debug_print statements will appear.
# textdbPil Defines the awips product identifier
# (e.g., DENCCFDEN) that is used to store the product
# in the AWIPS text database. The product is not
# automatically stored unless autoStore is 1. This
# value is also used for the default GUI entry for
# storage.
# awipsWANPil Defines the awips product identifier
# (e.g., KBOUCCFDEN) that is used to transmit the
# product to the AWIPS WAN. The product is not
# automatically transmitted unless autoSend is 1.
# This value is also used for the default GUI
# entry for storage.
# autoSend If set to 1, then the product will be automatically
# sent on the AWIPS WAN to the "autoSendAddress" with
# the "awipsWANPil after product creation.
# autoStore If set to 1, then the product will be automatically
# stored into the text database using the "textdbPil"
# after product creation.
# autoWrite If set to 1, then the product will be automatically
# written to the "output" named disk file after
# product creation.
#
# lineLength max length of each line
#
# defaultEditAreas defines edit areas, default is Combinations
#
# purgeTime Maximum number of hours past issuance time for the
# expire time.
# includeCities If 1, cities will be included in the area header
# accurateCities If 1, cities are determined from grids
# citiesPhrase "Including the cities of" phrase used when including
# cities
# includeZoneNames If 1, zone names will be included in the area header
# easPhrase Optional EAS phrase to be include in product header
#
# hazardSamplingThreshold Defines the percentage coverage or number of
# grid points in a zone that must contain the hazard
# in order for it to be considered. Tuple (percent, points)
# includeOverviewHeadline If 1, the overview header is templated
# includeOverview If 1, the overview section is templated
# bulletProd If 1, the product will use a bullet format
#-------------------------------------------------------------------------
# Weather Elements Needed:
# Hazards
#-------------------------------------------------------------------------
# Edit Areas Needed: None
#-------------------------------------------------------------------------
# Associated Utilities Files e.g. Combinations file:
# Combinations file
#-------------------------------------------------------------------------
# Component Products:
# Hazards
#-------------------------------------------------------------------------
# Development tasks that are identified and in progress:
#
# To look up tasks and their status, see the Text Product User Guide
# Section on "Tkgnats: Task Reporting System".
#-------------------------------------------------------------------------
# Additional Information:
#-------------------------------------------------------------------------
# Example Output:
#-------------------------------------------------------------------------
import LogStream
import TextRules
import SampleAnalysis
import time, string, types, copy, re
import CallToActions
import AbsTime
class TextProduct(TextRules.TextRules, SampleAnalysis.SampleAnalysis,
CallToActions.CallToActions):
Definition = {
"type": "smart",
"displayName": None,
# Source database for product. Can be "Official", "Fcst" or "ISC"
"database": "Official",
# Defines output location of finished product.
"outputFile": "{prddir}/TEXT/genHaz.txt",
"debug": 0,
# Name of map background for creating Combinations
# Can be:
# Zones_BOU
# FireWxZones_BOU
# Counties
# Marine_Zones_BOU
"mapNameForCombinations": "Zones_<site>",
## Edit Areas: Create Combinations file with edit area combinations.
## Can be:
## EditAreas_PublicZones_BOU
## EditAreas_FireWx_BOU
## EditAreas_FIPS_BOU
## EditAreas_MarineZones_BOU
"defaultEditAreas" : "EditAreas_PublicZones_<site>_<MultiPil>",
# product identifiers
"productName": "Generic Hazard Product", # product name
"fullStationID": "<fullStationID>", # full station identifier (4letter)
"wmoID": "<wmoID>", # WMO ID
"pil": "<pil>", # Product pil
"areaName": "", # Name of state, such as "Georgia" -- optional
"wfoCityState": "<wfoCityState>", # Location of WFO - city,state
"textdbPil": "<textdbPil>", # Product ID for storing to AWIPS text database.
"awipsWANPil": "<awipsWANPil>", # Product ID for transmitting to AWIPS WAN.
"periodCombining" : 0, # If 1, combine periods, if possible
# automatic functions
"autoSend": 0, #set to 1 to automatically transmit product
"autoSendAddress": "000", #transmission address
"autoStore": 0, #set to 1 to automatically store product in textDB
"autoWrite": 0, #set to 1 to automatically write product to file
# Area Dictionary -- Descriptive information about zones
"areaDictionary": "AreaDictionary",
# Language
"language": "english",
"lineLength": 66, #Maximum line length
"purgeTime": 8, # Maximum hours for expireTime
"includeCities": 1 , # Cities included in area header
"accurateCities": 0, # Include all cities in area header
"cityLocation": "CityLocation", # City lat/lon dictionary to use
"cityDescriptor":"Including the cities of",
"includeZoneNames":1, # Zone names will be included in the area header
"easPhrase" :"", # Optional EAS phrase to be include in product header
"includeOverviewHeadline": 1, #include overview header
"includeOverview": 1, #include overview section
"bulletProd": 0, # do not default to bullets
"hazardSamplingThreshold": (10, None), #(%cov, #points)
"callToAction": 1,
}
def __init__(self):
TextRules.TextRules.__init__(self)
SampleAnalysis.SampleAnalysis.__init__(self)
self.__overviewText = ""
self.__procCTA = None
def generateForecast(self, argDict):
# Generate Text Phrases for a list of edit areas
# Get variables
error = self._getVariables(argDict)
if error is not None:
return error
# Get the segments
hazardsC = argDict['hazards']
segmentList = self.organizeHazards(hazardsC.rawAnalyzedTable())
if len(segmentList) == 0:
return "No hazards to report"
# Determine time ranges
error = self._determineTimeRanges(argDict)
if error is not None:
return error
# Initialize the output string
fcst = ""
fcst = self._preProcessProduct(fcst, argDict)
# Generate the product for each segment in the segmentList
fraction = 0
fractionOne = 1.0/float(len(segmentList))
percent = 50.0
self.setProgressPercentage(50)
for segmentAreas in segmentList:
self.progressMessage(fraction, percent, "Making Product for Segment")
fcst = self._preProcessArea(fcst, segmentAreas, self._expireTime, argDict)
fcst = self._makeProduct(fcst, segmentAreas, argDict)
fcst = self._postProcessArea(fcst, segmentAreas, argDict)
fraction = fractionOne
fcst = self._postProcessProduct(fcst, argDict)
return fcst
def _getVariables(self, argDict):
# Make argDict accessible
self.__argDict = argDict
# Get Definition variables
self._definition = argDict["forecastDef"]
for key in self._definition.keys():
exec "self._" + key + "= self._definition[key]"
# Get VariableList
varDict = argDict["varDict"]
for key in varDict.keys():
if type(key) is types.TupleType:
label, variable = key
exec "self._" + variable + "= varDict[key]"
self._language = argDict["language"]
# Set up information for Hazards product
self._hazards = argDict['hazards']
self._combinations = argDict["combinations"]
return None
def _determineTimeRanges(self, argDict):
# Set up the time range for 0-240 hours
self._timeRange = self.createTimeRange(0, 240)
self._ddhhmmTime = self.getCurrentTime(
argDict, "%d%H%M", shiftToLocal=0, stripLeading=0)
self._issueTime = AbsTime.AbsTime(argDict['creationTime'])
self._currentTime = argDict['creationTime']
self._expireTime = self._issueTime + self._purgeTime*3600
self._timeLabel = self.getCurrentTime(
argDict, "%l%M %p %Z %a %b %e %Y", stripLeading=1)
return None
def _preProcessProduct(self, fcst, argDict):
# Product header
if self._areaName != "":
self._areaName = " for " + self._areaName
issuedByString = self.getIssuedByString()
productName = self.checkTestMode(argDict,
self._productName + self._areaName)
if len(self._easPhrase) != 0:
eas = self._easPhrase + '\n'
else:
eas = ''
s = self._wmoID + " " + self._fullStationID + " " + \
self._ddhhmmTime + "\n" + self._pil + "\n\n"
fcst = fcst + s.upper()
s = eas + productName + "\n" +\
"National Weather Service " + self._wfoCityState + \
"\n" + issuedByString + self._timeLabel + "\n\n"
fcst = fcst + s
fcst = fcst + "Default overview section\n"
return fcst
def _preProcessArea(self, fcst, segmentAreas, expireTime, argDict):
# This is the header for an edit area combination
areaHeader = self.makeAreaHeader(
argDict, "", self._issueTime, expireTime,
self._areaDictionary, None, cityDescriptor=self._cityDescriptor,
areaList=segmentAreas, includeCities=self._includeCities,
includeZoneNames = self._includeZoneNames,
accurateCities = self._accurateCities)
fcst = fcst + areaHeader
return fcst
def _makeProduct(self, fcst, segmentAreas, argDict):
argDict["language"] = self._language
# Generate Narrative Forecast for Edit Area
# get the hazards text
# We only need to get headlines for the first edit area
# in the segment since all areas in the segment have
# the same headlines
editArea = segmentAreas[0]
areaLabel = editArea
headlines = self.generateProduct("Hazards", argDict, area = editArea,
areaLabel=areaLabel,
timeRange = self._timeRange)
fcst = fcst + headlines
return fcst
def _postProcessArea(self, fcst, segmentAreas, argDict):
return fcst + "\n\n$$\n\n"
def _postProcessProduct(self, fcst, argDict):
#
# If an overview exists for this product, insert it
#
overview = self.finalOverviewText()
overviewSearch = re.compile(r'Default overview section', re.DOTALL)
fcst = overviewSearch.sub(overview, fcst)
#
# Added to place line feeds in the CAP tags to keep separate from CTAs
fcst = string.replace(fcst, \
r"PRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.", \
r"\nPRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.\n")
fcst = string.replace(fcst, "\n ","\n")
fcst = string.replace(fcst, "&&", "\n&&\n")
# Prevent empty Call to Action Tags
fcst = re.sub(r'\nPRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.\s*&&\n', \
"", fcst)
fcst = self._indentBulletText(fcst)
#
# Clean up multiple line feeds
#
fixMultiLF = re.compile(r'(\n\n)\n*', re.DOTALL)
fcst = fixMultiLF.sub(r'\1', fcst)
# finish progress meter
self.setProgressPercentage(100)
self.progressMessage(0, 100, self._displayName + " Complete")
return fcst
def allowedHazards(self):
return []
# Added for DR 21194
def _bulletDict(self):
return []
# Added for DR 21309
def _bulletOrder(self):
return []
## Replaced by 21309 code
## def _getBullets(self, newBulletList, argDict):
##
## ### get the bullet dictionary and split the bullets
## bDict = self._bulletDict()
## bLine = bDict.get(eachHazard['phen'])
## print 20* "*" + (eachHazard['phen'])
## bList = newBulletList.split(",")
##
## ### initialize the bullet output
## bullets = ""
##
## ### loop through the bullets and format the output
## for b in bList:
## bullets = bullets + "* " + b + "...|* Enter bullet text *|\n\n"
## # bullets = bullets + "\n"
## return bullets
def _indentBulletText(self, prevText):
print prevText
### if previous text is empty, return nothing
if prevText is None:
return prevText
###
### split the text
###
bullets = []
bullets = string.split(prevText, '\n\n')
if len(bullets) <= 1:
return prevText
###
### process the text
###
outText = ""
for b in bullets:
### if first character is a * we found a bullet
if re.match("\*", b):
### remove line feeds
removeLF = re.compile(r'(s*[^\n])\n([^\n])', re.DOTALL)
bullet = removeLF.sub(r'\1 \2',b)
### indent code
bullet = self.indentText(bullet, indentFirstString = '',
indentNextString = ' ', maxWidth=self._lineLength,
breakStrings=[" ", "..."])
###
### the "-" in the breakStrings line above is causing issues with
### offices that use "-20 degrees" in the text.
###
outText = outText + bullet + "\n\n"
else: ### not a bullet, CTA text
outText = outText + b + "\n\n"
### that's it
print outText
return outText
# The _hazardTimePhrases method is passed a hazard key, and returns
# time phrase wording consistent with that generated by the headline
# algorithms in DiscretePhrases.
#
def hazardTimePhrases(self, hazard, argDict, prefixSpace=True):
timeWords = self.getTimingPhrase(hazard, argDict['creationTime'])
if prefixSpace and len(timeWords):
timeWords = " " + timeWords #add a leading space
return timeWords
#
# The method hazardBodyText creates an attribution phrase
#
def hazardBodyText(self, hazardList, argDict):
bulletProd = self._bulletProd
hazardBodyPhrase = ''
#
# First, sort the hazards for this segment by importance
#
sortedHazardList = []
for each in ['W', 'Y', 'A', 'O', 'S']:
for eachHazard in hazardList:
if eachHazard['sig'] == each:
if eachHazard not in sortedHazardList:
sortedHazardList.append(eachHazard)
#
# Next, break them into individual lists based on action
#
newList = []
canList = []
expList = []
extList = []
conList = []
upgList = []
statementList = []
for eachHazard in sortedHazardList:
if eachHazard['sig'] in ['S']and eachHazard['phen'] in ['CF', 'LS']:
statementList.append(eachHazard)
elif eachHazard['act'] in ['NEW', 'EXA', 'EXB']:
newList.append(eachHazard)
elif eachHazard['act'] in ['CAN']:
canList.append(eachHazard)
elif eachHazard['act'] in ['EXP']:
expList.append(eachHazard)
elif eachHazard['act'] in ['EXT']:
extList.append(eachHazard)
elif eachHazard['act'] in ['UPG']:
upgList.append(eachHazard)
else:
conList.append(eachHazard)
#
# Now, go through each list and build the phrases
#
nwsIntroUsed = 0
#
# This is for the new hazards
#
phraseCount = 0
lastHdln = None
for eachHazard in newList:
hdln = eachHazard['hdln']
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
endTimePhrase = self.hazardTimePhrases(eachHazard, argDict)
hazNameA = self.hazardName(eachHazard['hdln'], argDict, True)
hazNameACap = self.sentence(hazNameA, addPeriod=False)
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
if hazName in ["Winter Weather Advisory", "Winter Storm Warning", "Beach Hazards Statement"]:
forPhrase = " for |* Enter hazard type *|"
else:
forPhrase =""
if nwsIntroUsed == 0:
hazardBodyPhrase = "The National Weather Service in " + self._wfoCity
nwsIntroUsed = 1
if phraseCount == 0:
phraseCount = 1
if eachHazard['phen'] in ['HU', 'TR', 'TY']:
hazardBodyPhrase = hazardBodyPhrase + " has issued " + \
hazNameA + ". "
else:
hazardBodyPhrase += " has issued " + hazNameA + forPhrase + \
", which is in effect" + endTimePhrase + ". "
elif phraseCount == 1:
phraseCount = 2
if hdln != lastHdln:
if eachHazard['phen'] in ['HU', 'TR', 'TY']:
hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \
" has also been issued."
else:
hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \
" has also been issued. This " + hazName + forPhrase + \
" is in effect" + endTimePhrase + ". "
else:
if eachHazard['phen'] in ['HU', 'TR', 'TY']:
hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \
" has also been issued."
else:
hazardBodyPhrase = hazardBodyPhrase + hazNameACap + forPhrase + \
" has also been issued" + endTimePhrase + ". "
else:
if eachHazard['phen'] in ['HU', 'TR', 'TY']:
hazardBodyPhrase += "In addition, " + \
hazNameA + " has been issued."
else:
hazardBodyPhrase += "In addition, " + \
hazNameA + forPhrase + " has been issued. This " + hazName + \
" is in effect" + endTimePhrase + ". "
lastHdln = hdln
#
# This is for the can hazards
#
for eachHazard in canList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
if nwsIntroUsed == 0:
hazardBodyPhrase = "The National Weather Service in " +\
self._wfoCity
nwsIntroUsed = 1
hazardBodyPhrase = hazardBodyPhrase + \
" has cancelled the " + hazName + ". "
else:
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" has been cancelled. "
#
# This is for the exp hazards
#
phraseCount = 0
for eachHazard in expList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
if self._bulletProd:
continue # No attribution for this case if it is a bullet product
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
if eachHazard['endTime'] <= argDict['creationTime']:
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" is no longer in effect. "
else:
expTimeCurrent = argDict['creationTime']
timeWords = self.getTimingPhrase(eachHazard, expTimeCurrent)
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" will expire " + timeWords + ". "
#
# This is for ext hazards
#
for eachHazard in extList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
if self._bulletProd:
continue # No attribution for this case if it is a bullet product
endTimePhrase = self.hazardTimePhrases(eachHazard, argDict)
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" is now in effect" + endTimePhrase + ". "
#
# This is for upgrade hazards
#
for eachHazard in upgList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" is no longer in effect. "
#
# This is for con hazards
#
for eachHazard in conList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
if self._bulletProd:
continue # No attribution for this case if it is a bullet product
endTimePhrase = self.hazardTimePhrases(eachHazard, argDict)
hazNameA = self.hazardName(eachHazard['hdln'], argDict, True)
hazardBodyPhrase = hazardBodyPhrase + hazNameA + \
" remains in effect" + endTimePhrase + ". "
#
# This is for statement hazards
#
for eachHazard in statementList:
hazardBodyPhrase = "...|* Add statement headline *|...\n\n"
#
# This adds segment text
#
segmentText = ''
#
# Check that this segment codes to determine capture or not,
# and frame captured text or not
#
incTextFlag, incFramingCodes, skipCTAs, forceCTAList = \
self.useCaptureText(sortedHazardList)
#
#
# Check that the previous text exists
#
foundCTAs = []
for eachHazard in sortedHazardList:
if eachHazard.has_key('prevText'):
prevText = eachHazard['prevText']
if eachHazard['pil'] == 'MWS':
startPara = 0
else:
startPara = 1
segmentText, foundCTAs = self.cleanCapturedText(prevText,
startPara, addFramingCodes = False,
skipCTAs = skipCTAs)
tester = segmentText[0]
if tester == '*':
startPara = 1
else:
startPara = 2
segmentText, foundCTAs = self.cleanCapturedText(prevText,
startPara, addFramingCodes = False,
skipCTAs = skipCTAs)
#
# Check that the segment text isn't very short or blank
#
if len(segmentText) < 6:
incTextFlag = 0
# DR 21309 code addition from Middendorf (BYZ)
#
# Now if there is a new hazard and previous segment Text, then
# we may have to add bullets.
#
if incTextFlag and bulletProd:
for eachHazard in sortedHazardList:
if not eachHazard.has_key('prevText'):
newBullets = string.split(self._bulletDict().get(eachHazard['phen']),",")
print "newBullets = ", newBullets
print "segment text is: ", segmentText
for bullet in newBullets:
if re.search("\* " + bullet + "\.\.\.", segmentText, flags=re.IGNORECASE) is None:
print bullet + " not in segmentText"
start = self._bulletOrder().index(bullet) + 1
end = len(self._bulletOrder())
bulletFlag = 1
for i in range(start,end):
if (re.search("\* " + self._bulletOrder()[i] + "\.\.\.", segmentText, flags=re.IGNORECASE) is not None) and bulletFlag:
print "* " + self._bulletOrder()[i] + "... found!"
segmentTextSplit = re.split("\* " + self._bulletOrder()[i] + "\.\.\.", segmentText, flags=re.IGNORECASE)
segmentText = string.join(segmentTextSplit,"* " + bullet.upper() + \
"...|* Enter bullet text *|\n\n* " + self._bulletOrder()[i] + "...")
bulletFlag = 0
if bulletFlag:
print "appending to bottom list of bullets!"
segmentTextSplit = re.split("PRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.", segmentText, flags=re.IGNORECASE)
segmentText = "\n" + string.join(segmentTextSplit,"* " + bullet.upper() + \
"...|* Enter bullet text *|\n\nPRECAUTIONARY/PREPAREDNESS ACTIONS...")
bulletFlag = 0
#
# Now if there is a can/exp hazard and previous segment Text, then
# we may have to remove bullets.
#
if incTextFlag and bulletProd:
# First make list of bullets that we need to keep.
keepBulletList = []
for eachHazard in sortedHazardList:
if eachHazard['act'] not in ["CAN","EXP"]:
saveBullets = string.split(self._bulletDict().get(eachHazard['phen']),",")
for saveBullet in saveBullets:
if saveBullet not in keepBulletList:
keepBulletList.append(saveBullet)
# Now determine which bullets we have to remove.
removeBulletList = []
for eachHazard in sortedHazardList:
if eachHazard['act'] in ["CAN","EXP"]:
canBullets = string.split(self._bulletDict().get(eachHazard['phen']),",")
for canBullet in canBullets:
if canBullet not in keepBulletList and canBullet not in removeBulletList:
removeBulletList.append(canBullet)
print "hazardBodyText info: keepBulletList: ",keepBulletList
print "hazardBodyText info: removeBulletList: ",removeBulletList
# Finally remove the bullets no longer needed.
for bullet in removeBulletList:
if re.search("\* "+ bullet + "\.\.\.", segmentText, flags=re.IGNORECASE) is not None:
segmentTextSplit = re.split("\* " + bullet + "\.\.\.", segmentText, flags=re.IGNORECASE)
print "segmentTextSplit is ", segmentTextSplit
segmentTextSplit2 = string.split(segmentTextSplit[1],"*",1)
if len(segmentTextSplit2) == 2:
segmentTextSplit[1] = "*" + segmentTextSplit2[1]
else:
segmentTextSplit2 = re.split("PRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.", segmentTextSplit[1], 1, flags=re.IGNORECASE)
if len(segmentTextSplit2) == 2:
segmentTextSplit[1] = "PRECAUTIONARY/PREPAREDNESS ACTIONS..." + segmentTextSplit2[1]
segmentText = string.join(segmentTextSplit,"")
if removeBulletList != []:
segmentText = "|*\n" + segmentText + "*|"
else:
segmentText = segmentText
#
# If segment passes the above checks, add the text
#
print "hazardBodyText info: incTextFlag: ",incTextFlag
if incTextFlag:
print "hazardBodyText info: segmentText: ",segmentText
hazardBodyPhrase = hazardBodyPhrase + "\n\n" + \
segmentText + '\n\n'
elif bulletProd:
bulletFlag = 0
if eachHazard['act'] == 'CAN':
hazardBodyPhrase = hazardBodyPhrase + \
"\n\n|* Wrap-up text goes here *|.\n"
elif eachHazard['act'] == 'EXP':
hazardBodyPhrase = hazardBodyPhrase + \
"\n\n|* Wrap-up text goes here *|.\n"
else:
bulletFlag = 1
## print "bulletFlag is: ",bulletFlag
if bulletFlag:
newBulletList = []
bullets = ""
for eachHazard in sortedHazardList:
### get the default bullets for all hazards from the bullet diction
newBullets = string.split(self._bulletDict().get(eachHazard['phen']),",")
for newBullet in newBullets:
if newBullet not in newBulletList:
newBulletList.append(newBullet)
print "my bullets are: ", newBulletList
### Determine the correct order for all bullets
bulletOrder = self._bulletOrder()
staticBulletOrder = self._bulletOrder()
for bullet in staticBulletOrder:
print "correct bullet order should be: ", bulletOrder
if bullet not in newBulletList:
bulletOrder.remove(bullet)
print "reordered bullets are: ", bulletOrder
for b in bulletOrder:
bullets = bullets + "* " + b.upper() + "...|* Enter bullet text *|\n\n"
hazardBodyPhrase = hazardBodyPhrase + "\n\n" + bullets
# If segment doesn't pass the checks, put in framing codes
else:
hazardBodyPhrase = hazardBodyPhrase + \
"\n\n|* Statement text goes here *|.\n\n"
# End code for DR 21310
#
# This adds the call to action statements. This is only performed
# if the segment is 'NEW' or if the previous text has been discarded
# due to a CAN/EXP/UPG segment
#
# remove items from forceCTAList if they exist in foundCTAs. Note
# that the formats of these lists are different, thus this code
# is more complicated
for ent in foundCTAs:
#only process CTAs that are vtec phen/sig based
if ent.find('.') == 2:
phensig = (ent[0:2], ent[3]) #phen.sig
if phensig in forceCTAList:
del forceCTAList[forceCTAList.index(phensig)]
hazardBodyPhrase = hazardBodyPhrase + '\n\n'
ctas = []
for (phen,sig) in forceCTAList:
hazardPhenSig = phen + "." + sig
cta = self.defaultCTA(hazardPhenSig)
if cta not in ctas:
ctas.append(cta)
if len(ctas) > 0:
hazardBodyPhrase = hazardBodyPhrase + \
'PRECAUTIONARY/PREPAREDNESS ACTIONS...\n\n'
for c in ctas:
hazardBodyPhrase = hazardBodyPhrase + c + '\n\n'
hazardBodyPhrase = hazardBodyPhrase + '&&\n\n'
# Make sure there is only one CAP tag pairs
hazardBodyPhrase = re.sub(r'&&\s*PRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.\n', \
"", hazardBodyPhrase)
return hazardBodyPhrase
def finalOverviewText(self):
#if didn't calculate any, use the default
if len(self.__overviewText) == 0:
if self._includeOverviewHeadline:
overviewHeadline = "...|*Overview headline (must edit)*|...\n\n"
else:
overviewHeadline = ""
if self._includeOverview:
overviewBody = ".|*Overview (must edit)*|.\n\n"
else:
overviewBody = ""
#assemble the lines
overview = overviewHeadline + overviewBody
return overview
else:
return self.__overviewText
def overviewText(self, hazardList, pil):
#
# This method finds an overview in the previous product
#
overview = ""
for each in hazardList:
if (each.has_key('prevOverviewText') and
each.has_key('pil') and
each.has_key('endTime') and
each.has_key('act')):
if (each['pil'] == pil and
each['endTime'] > self._currentTime and
each['act'] not in ['CAN', 'EXP']):
overview = each['prevOverviewText']
self.__overviewText, dummy = self.cleanCapturedText(
overview, 0)
break
def useCaptureText(self, hazardList):
#Based on the hazardlist, returns a tuple indicating:
# (inc capture text, inc framing codes, skip CTAs, forceCTAList)
#
# For the values to be considered, the 'hdln' value must be
# present in the list, or it needs to be a Statement (sig="S")
cans = ['CAN','UPG','EXP']
acts = ['NEW','EXT','EXA','EXB','CON']
foundACTS = 0
foundCANS = 0
foundSig = []
for eh in hazardList:
if eh['act'] in acts and (len(eh['hdln']) or eh['sig'] == 'S'):
foundACTS = 1
if eh['act'] in cans and (len(eh['hdln']) or eh['sig'] == 'S'):
foundCANS = 1
if eh['sig'] not in foundSig:
foundSig.append(eh['sig'])
includeFrameCodes = 0
includeText = 1
skipCTAs = 0
forceCTAList = []
# all actions are in CAN, UPG, EXP only (don't include text)
if foundCANS and not foundACTS:
if 'S' in foundSig and len(foundSig) == 1: #only S
includeFrameCodes = 1 #capture text, but frame it
else:
includeText = 0 #end of non statement
# something in CANS and something in acts (frame it, include text)
elif foundCANS and foundACTS:
includeFrameCodes = 1
skipCTAs = 1
for eh in hazardList:
if eh['act'] in acts and \
(eh['phen'], eh['sig']) not in forceCTAList and \
len(eh['hdln']):
forceCTAList.append((eh['phen'], eh['sig']))
#everything in active entries, captured text is used, but still
# need to handle the "NEW" entries.
else:
for eh in hazardList:
if eh['act'] in ['NEW'] and len(eh['hdln']):
forceCTAList.append((eh['phen'], eh['sig']))
return (includeText, includeFrameCodes, skipCTAs, forceCTAList)
def cleanCapturedText(self, text, paragraphs, addFramingCodes = False,
skipCTAs = False):
#
# This method takes a block of text, wraps it preserving blank lines,
# then returns the part after 'paragraphs'. So, if paragraphs is 0, it
# returns the whole thing, if it's 2, it returns paragraphs 2 -> end, etc.
# Headlines are always removed.
# Framing codes are added if specified.
#
paras = self.convertSingleParas(text) #single paragraphs
# keep track of any call to actions found
foundCTAs = []
# Process the paragraphs, keep only the interested ones
paraCount = 0
processedText = ''
for eachPara in paras:
if paraCount >= paragraphs:
found = self.ctasFound(eachPara) #get list of ctas found
if skipCTAs and len(found):
pass
else:
processedText = processedText + eachPara + '\n\n'
#keep track of remaining CTAs in processed text
for f in found:
if f not in foundCTAs:
foundCTAs.append(f)
if eachPara.find('...') == 0:
pass #ignore headlines
paraCount = paraCount + 1
# Add framing codes
if addFramingCodes:
processedText = processedText.rstrip()
processedText = "|*\n" + processedText + "*|\n"
# Wrap
processedText = self.endline(processedText,
linelength=self._lineLength, breakStr=[" ", "-", "..."])
return processedText, foundCTAs
def decodeBulletedText(self, prevText):
# returns the bullet paragraph text or None, returns the
# regular text after the bullets. The afterText is text up to
# the next bullet or up to "The National Weather Service". Note
# that this only correctly handles the 1st set of entries in
# a segment, thus double events will only decode the first set
# of bullets and text. The multipleRecords is set to 1 in the
# event that there are multiple sets of bullets. In this case
# only the 1st set was captured/decoded.
# (hazard, time, basis, impact, afterText, multipleRecords)
if prevText is None:
return (None, None, None, None, None, None)
# find the bullets
bullets = []
buf = prevText.split('\n\n* ')
if len(buf) <= 1:
return (None, None, None, None, None, None)
multRecords = 0 #indicator of multiple sets of bullets
for x in xrange(len(buf)):
if x == 0:
continue #headlines and text before the bullets
bullets.append(buf[x])
# find only the bulleted text, defined by the double line feed term.
# of the text
regText = "" #regular text after bullets
for x in xrange(1, len(bullets)):
index = bullets[x].find('\n\n')
if index != -1:
regText = bullets[x][index+2:]
bullets[x] = bullets[x][0:index] #eliminate after bullet text
if len(bullets) > x+2: #more bullets are present
multRecords = 1
bullets = bullets[0:x+1] #only interested in these bullets
break
# regular text is the remainder of the text. However we only
# want text from the last in the series of bullets to the
# beginning of any next NWS phrase.
lines = regText.split('\n')
for x in xrange(len(lines)):
if lines[x].find('The National Weather Service') == 0:
lines = lines[0:x] #eliminate following lines
break
regText = ("\n").join(lines)
# now clean up the text
for x in xrange(len(bullets)):
bullets[x] = string.replace(bullets[x],'\n',' ')
removeLF = re.compile(r'(s*[^\n])\n([^\n])', re.DOTALL)
regText = removeLF.sub(r'\1 \2',regText)
# extract out each section for returning the values
if len(bullets) >= 1:
hazard = bullets[0]
else:
hazard = None
if len(bullets) >= 2:
time = bullets[1]
else:
time = None
if len(bullets) >= 3:
basis = bullets[2]
else:
basis = None
if len(bullets) >= 4:
impact = bullets[3]
else:
impact = None
if len(regText) == 0:
regText = None #no regular text after bullets
return (hazard, time, basis, impact, regText, multRecords)
def substituteBulletedText(self, capText, defaultText, frameit="Never"):
#returns a properly formatted bulleted text based on
#the capText variable. If capText is None or 0 length, then
#the default text is used. frameit can be "Never", in which
#nothing is wrapped in framing codes, "Always" in which the
#text (default or cap) is wrapped in framing codes, or
#DefaultOnly" in which just the default text is wrapped.
if capText is not None and len(capText):
textToUse = capText[0].upper()+capText[1:]
if frameit == "Always":
textToUse = "|* " + textToUse + " *|"
else:
textToUse = defaultText
if frameit == "Always" or frameit == "DefaultOnly":
textToUse = "|* " + textToUse + " *|"
# add bullet codes
textToUse = "* " + textToUse
# format it
return self.indentText(textToUse, indentFirstString = '',
indentNextString = ' ', maxWidth=self._lineLength,
breakStrings=[" ", "-", "..."])
def convertSingleParas(self, text):
#returns a list of paragraphs based on the input text.
lf = re.compile(r'(s*[^\n])\n([^\n])', re.DOTALL)
ptext = lf.sub(r'\1 \2', text)
ptext = ptext.replace('\n\n', '\n')
paragraphs = ptext.split('\n')
return paragraphs
def ctasFound(self, text):
#returns types of ctas found. The identifier is the pil (e.g., ZFP),
#phen/sig (e.g., DU.Y), or GENERIC. Uses the CallToAction definitions.
#convert text to single paragraphs
paragraphs = self.convertSingleParas(text)
for x in xrange(len(paragraphs)):
paragraphs[x] = string.replace(paragraphs[x],' ','')
#make list of call to actions (type, cta text)
if self.__procCTA is None:
self.__procCTA = []
ctao = CallToActions.CallToActions()
d = ctao.ctaDict()
for k in d.keys():
func = d[k]
items = func()
for it in items:
if type(it) == types.TupleType:
it = it[1] #get second string which is the CTA
ctaParas = self.convertSingleParas(it)
for cta in ctaParas:
self.__procCTA.append((k,string.replace(cta,' ','')))
d = ctao.ctaPilDict()
for k in d.keys():
func = d[k]
items = func()
for it in items:
if type(it) == types.TupleType:
it = it[1] #get second string which is the CTA
ctaParas = self.convertSingleParas(it)
for cta in ctaParas:
self.__procCTA.append((k,string.replace(cta,' ','')))
ctas = ctao.genericCTAs()
for it in ctas:
if type(it) == types.TupleType:
it = it[1] #get second string which is the CTA
ctaParas = self.convertSingleParas(it)
for cta in ctaParas:
self.__procCTA.append(("GENERIC",
string.replace(cta,' ','')))
#compare
found = []
for para in paragraphs:
for (ctaType, cta) in self.__procCTA:
## Added following line to account for framing code issues in CTA
cta = re.sub("\|\*.*\*\|","",cta)
# We want this comparison to be case-insensitive just in case
# the site is not transmitting in mixed case yet.
if para.upper() == cta.upper() and ctaType not in found:
found.append(ctaType)
return found
| 41.29769
| 151
| 0.53106
| 4,665
| 48,277
| 5.465166
| 0.18821
| 0.003138
| 0.008237
| 0.007688
| 0.223416
| 0.196038
| 0.167327
| 0.150971
| 0.134262
| 0.120455
| 0
| 0.009976
| 0.358432
| 48,277
| 1,168
| 152
| 41.333048
| 0.81316
| 0.306378
| 0
| 0.274096
| 0
| 0
| 0.101009
| 0.012153
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.003012
| 0.009036
| null | null | 0.02259
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a84ca10fd051b6b0bb8be0088246cc71958f9d5
| 12,062
|
py
|
Python
|
oase-root/web_app/views/system/mail/action_mail.py
|
Masa-Yasuno/oase
|
90f3cee73c0d9b3153808a4a72bd19984a4873f9
|
[
"Apache-2.0"
] | 9
|
2020-03-25T07:51:47.000Z
|
2022-02-07T00:07:28.000Z
|
oase-root/web_app/views/system/mail/action_mail.py
|
Masa-Yasuno/oase
|
90f3cee73c0d9b3153808a4a72bd19984a4873f9
|
[
"Apache-2.0"
] | 1,164
|
2021-01-28T23:16:11.000Z
|
2022-03-28T07:23:10.000Z
|
oase-root/web_app/views/system/mail/action_mail.py
|
Masa-Yasuno/oase
|
90f3cee73c0d9b3153808a4a72bd19984a4873f9
|
[
"Apache-2.0"
] | 25
|
2020-03-17T06:48:30.000Z
|
2022-02-15T15:13:44.000Z
|
# Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
[概要]
MAILアクション用画面表示補助クラス
"""
import pytz
import datetime
import json
import socket
import traceback
from django.http import HttpResponse
from django.http import HttpResponseServerError
from django.db import transaction
from django.conf import settings
from libs.commonlibs import define as defs
from libs.commonlibs.oase_logger import OaseLogger
from libs.commonlibs.aes_cipher import AESCipher
from web_app.models.models import ActionType
from web_app.models.mail_models import MailDriver
from web_app.templatetags.common import get_message
from web_app.serializers.unicode_check import UnicodeCheck
logger = OaseLogger.get_instance() # ロガー初期化
class mailDriverInfo():
def __init__(self, drv_id, act_id, name, ver, icon_name):
self.drv_id = drv_id
self.act_id = act_id
self.name = name
self.ver = ver
self.icon_name = icon_name
def __str__(self):
return '%s(ver%s)' % (self.name, self.ver)
def get_driver_name(self):
return '%s Driver ver%s' % (self.name, self.ver)
def get_driver_id(self):
return self.drv_id
def get_icon_name(self):
return self.icon_name
@classmethod
def get_template_file(cls):
return 'system/mail/action_mail.html'
@classmethod
def get_info_list(cls, user_groups):
try:
mail_driver_obj_list = MailDriver.objects.all()
except Exception as e:
# ここでの例外は大外で拾う
raise
protocol_dict = cls.get_define()['dict']
mail_driver_dto_list = []
cipher = AESCipher(settings.AES_KEY)
for mail_obj in mail_driver_obj_list:
mail_info = mail_obj.__dict__
if mail_obj.password:
mail_info['password'] = cipher.decrypt(mail_obj.password)
mail_info['protocol_str'] = protocol_dict[mail_obj.protocol]
mail_driver_dto_list.append(mail_info)
return mail_driver_dto_list
@classmethod
def get_group_list(cls, user_groups):
"""
[概要]
グループ一覧を取得する(システム管理グループを除く)
"""
return []
@classmethod
def get_define(cls):
protocol_dict = {key_value['v']: key_value['k'] for key_value in defs.SMTP_PROTOCOL.LIST_ALL}
defines = {
'list_all': defs.SMTP_PROTOCOL.LIST_ALL,
'dict': protocol_dict,
}
return defines
def record_lock(self, json_str, request):
logger.logic_log('LOSI00001', 'None', request=request)
driver_id = self.get_driver_id()
# 更新前にレコードロック
if json_str['json_str']['ope'] in (defs.DABASE_OPECODE.OPE_UPDATE, defs.DABASE_OPECODE.OPE_DELETE):
drvinfo_modify = int(json_str['json_str']['mail_driver_id'])
MailDriver.objects.select_for_update().filter(pk=drvinfo_modify)
logger.logic_log('LOSI00002', 'Record locked.(driver_id=%s)' % driver_id, request=request)
def modify(self, json_str, request):
"""
[メソッド概要]
グループのDB更新処理
"""
logger.logic_log('LOSI00001', 'None', request=request)
error_flag = False
error_msg = {
'mail_disp_name' : '',
'protocol' : '',
'smtp_server' : '',
'port' : '',
'user' : '',
'password' : '',
}
now = datetime.datetime.now(pytz.timezone('UTC'))
emo_chk = UnicodeCheck()
# 成功時データ
response = {"status": "success",}
try:
rq = json_str['json_str']
ope = int(rq['ope'])
#削除以外の場合の入力チェック
if ope != defs.DABASE_OPECODE.OPE_DELETE:
error_flag = self._validate(rq, error_msg, request)
if error_flag:
raise UserWarning('validation error.')
# パスワードを暗号化 空なら空文字
cipher = AESCipher(settings.AES_KEY)
if ope == defs.DABASE_OPECODE.OPE_UPDATE:
encrypted_password = cipher.encrypt(rq['password']) if rq['password'] else ''
driver_info_mod = MailDriver.objects.get(mail_driver_id=rq['mail_driver_id'])
driver_info_mod.mail_disp_name = rq['mail_disp_name']
driver_info_mod.protocol = rq['protocol']
driver_info_mod.smtp_server = rq['smtp_server']
driver_info_mod.port = rq['port']
driver_info_mod.user = rq['user']
driver_info_mod.password = encrypted_password
driver_info_mod.last_update_user = request.user.user_name
driver_info_mod.last_update_timestamp = now
driver_info_mod.save(force_update=True)
elif ope == defs.DABASE_OPECODE.OPE_DELETE:
MailDriver.objects.filter(pk=rq['mail_driver_id']).delete()
elif ope == defs.DABASE_OPECODE.OPE_INSERT:
encrypted_password = cipher.encrypt(rq['password']) if rq['password'] else ''
driver_info_reg = MailDriver(
mail_disp_name = rq['mail_disp_name'],
protocol = rq['protocol'],
smtp_server = rq['smtp_server'],
port = rq['port'],
user = rq['user'],
password = encrypted_password,
last_update_user = request.user.user_name,
last_update_timestamp = now
).save(force_insert=True)
except MailDriver.DoesNotExist:
logger.logic_log('LOSM07006', "mail_driver_id", mail_driver_id, request=request)
except Exception as e:
logger.logic_log('LOSI00005', traceback.format_exc(), request=request)
response = {
'status': 'failure',
'error_msg': error_msg, # エラー詳細(エラーアイコンで出す)
}
logger.logic_log('LOSI00002', 'response=%s' % response, request=request)
return response
def _validate(self, rq, error_msg, request):
"""
[概要]
入力チェック
[引数]
rq: dict リクエストされた入力データ
error_msg: dict
[戻り値]
"""
logger.logic_log('LOSI00001', 'data: %s, error_msg:%s'%(rq, error_msg))
error_flag = False
emo_chk = UnicodeCheck()
emo_flag = False
emo_flag_ita_disp_name = False
emo_flag_hostname = False
if len(rq['mail_disp_name']) == 0:
error_flag = True
error_msg['mail_disp_name'] += get_message('MOSJA27201', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07001', 'mail_disp_name', request=request)
if len(rq['mail_disp_name']) > 64:
error_flag = True
error_msg['mail_disp_name'] += get_message('MOSJA27202', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'mail_disp_name', 64, rq['mail_disp_name'], request=request)
# 絵文字チェック
value_list = emo_chk.is_emotion(rq['mail_disp_name'])
if len(value_list) > 0:
error_flag = True
emo_flag = True
error_msg['mail_disp_name'] += get_message('MOSJA27216', request.user.get_lang_mode(), showMsgId=False) + '\n'
if len(rq['protocol']) == 0:
error_flag = True
error_msg['protocol'] += get_message('MOSJA27212', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07001', 'protocol', request=request)
if len(rq['protocol']) > 64:
error_flag = True
error_msg['protocol'] += get_message('MOSJA27213', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'protocol', 64, rq['protocol'], request=request)
if len(rq['smtp_server']) == 0:
error_flag = True
error_msg['smtp_server'] += get_message('MOSJA27203', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07001', 'smtp_server', request=request)
if len(rq['smtp_server']) > 128:
error_flag = True
error_msg['smtp_server'] += get_message('MOSJA27204', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'smtp_server', 64, rq['smtp_server'], request=request)
# 絵文字チェック
value_list = emo_chk.is_emotion(rq['smtp_server'])
if len(value_list) > 0:
error_flag = True
error_msg['smtp_server'] += get_message('MOSJA27217', request.user.get_lang_mode(), showMsgId=False) + '\n'
if len(rq['port']) == 0:
error_flag = True
error_msg['port'] += get_message('MOSJA27205', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07001', 'port', request=request)
try:
tmp_port = int(rq['port'])
if 0 > tmp_port or tmp_port > 65535:
error_flag = True
error_msg['port'] += get_message('MOSJA27206', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07003', 'port', rq['port'], request=request)
except ValueError:
error_flag = True
error_msg['port'] += get_message('MOSJA27206', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07003', 'port', rq['port'], request=request)
if len(rq['user']) > 64:
error_flag = True
error_msg['user'] += get_message('MOSJA27207', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'user', 64, rq['user'], request=request)
# 絵文字チェック
value_list = emo_chk.is_emotion(rq['user'])
if len(value_list) > 0:
error_flag = True
error_msg['user'] += get_message('MOSJA27218', request.user.get_lang_mode(), showMsgId=False) + '\n'
if len(rq['password']) > 64:
error_flag = True
error_msg['password'] += get_message('MOSJA27208', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'password', 64, rq['password'], request=request)
# 絵文字チェック
value_list = emo_chk.is_emotion(rq['password'])
if len(value_list) > 0:
error_flag = True
error_msg['password'] += get_message('MOSJA27219', request.user.get_lang_mode(), showMsgId=False) + '\n'
if not emo_flag:
duplication = MailDriver.objects.filter(mail_disp_name=rq['mail_disp_name'])
if len(duplication) == 1 and int(rq['mail_driver_id']) != duplication[0].mail_driver_id:
error_flag = True
error_msg['mail_disp_name'] += get_message('MOSJA27209', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07004', 'mail_disp_name', rq['mail_disp_name'], request=request)
if error_flag == False:
# 疎通確認
resp_code = -1
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
resp_code = sock.connect_ex((rq['smtp_server'], int(rq['port']))) # host名名前解決が必要/etc/hostsとか
sock.close()
except Exception as e:
pass
if resp_code != 0:
error_flag = True
#todo 仮でこのエラーは名前に入れている
error_msg['mail_disp_name'] += get_message('MOSJA27215', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07005', rq['smtp_server'], rq['port'], request=request)
return error_flag
| 35.372434
| 122
| 0.596419
| 1,433
| 12,062
| 4.750174
| 0.193999
| 0.029382
| 0.035258
| 0.044954
| 0.418834
| 0.367563
| 0.327163
| 0.276627
| 0.240341
| 0.20332
| 0
| 0.026942
| 0.286105
| 12,062
| 340
| 123
| 35.476471
| 0.763558
| 0.072293
| 0
| 0.212963
| 0
| 0
| 0.122767
| 0.004443
| 0
| 0
| 0
| 0.002941
| 0
| 1
| 0.055556
| false
| 0.060185
| 0.074074
| 0.023148
| 0.180556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
8a85f7a1837485544e723eea52a8cc5f16480c6c
| 6,816
|
py
|
Python
|
ophyd/areadetector/detectors.py
|
NSLS-II/ophyd
|
d5fc722eef4d3d83845b1d523004302ec3aadb78
|
[
"BSD-3-Clause"
] | 16
|
2015-05-20T20:48:25.000Z
|
2019-04-24T21:12:59.000Z
|
ophyd/areadetector/detectors.py
|
NSLS-II/ophyd
|
d5fc722eef4d3d83845b1d523004302ec3aadb78
|
[
"BSD-3-Clause"
] | 594
|
2015-01-05T21:55:21.000Z
|
2019-05-10T02:05:24.000Z
|
ophyd/areadetector/detectors.py
|
NSLS-II/ophyd
|
d5fc722eef4d3d83845b1d523004302ec3aadb78
|
[
"BSD-3-Clause"
] | 34
|
2015-01-23T19:50:58.000Z
|
2019-05-07T05:38:57.000Z
|
# vi: ts=4 sw=4
'''AreaDetector Devices
`areaDetector`_ detector abstractions
.. _areaDetector: https://areadetector.github.io/master/index.html
'''
import warnings
from .base import (ADBase, ADComponent as C)
from . import cam
__all__ = ['DetectorBase',
'AreaDetector',
'AdscDetector',
'Andor3Detector',
'AndorDetector',
'BrukerDetector',
'DexelaDetector',
'EmergentVisionDetector',
'EigerDetector',
'FirewireLinDetector',
'FirewireWinDetector',
'GreatEyesDetector',
'LightFieldDetector',
'Mar345Detector',
'MarCCDDetector',
'PSLDetector',
'PerkinElmerDetector',
'PICamDetector',
'PilatusDetector',
'PixiradDetector',
'PointGreyDetector',
'ProsilicaDetector',
'PvcamDetector',
'RoperDetector',
'SimDetector',
'URLDetector',
'UVCDetector',
'Xspress3Detector'
]
class DetectorBase(ADBase):
"""
The base class for the hardware-specific classes that follow.
Note that Plugin also inherits from ADBase.
This adds some AD-specific methods that are not shared by the plugins.
"""
_default_configuration_attrs = (ADBase._default_configuration_attrs +
('cam', ))
def generate_datum(self, key, timestamp, datum_kwargs=None):
"""
Notify plugins of acquisition being complete.
When a new acquisition is started, this method is called with a
key which is a label like 'light', 'dark', or 'gain8'.
It in turn calls ``generate_datum`` on all of the plugins that have
that method.
File plugins are identified by searching for a
:meth:`~ophyd.areadetector.filestore_mixins.FileStoreBase.generate_datum`
method that must have the signature ::
def generate_datum(key: str, timestamp: float, datum_kwargs: dict):
...
Parameters
----------
key : str
The label for the datum that should be generated
timestamp : float
The time of the trigger
datum_kwargs : Dict[str, Any], optional
Any datum kwargs that should go to all children.
"""
if datum_kwargs is None:
datum_kwargs = {}
file_plugins = [s for s in self._signals.values() if
hasattr(s, 'generate_datum')]
for p in file_plugins:
if p.enable.get():
p.generate_datum(key, timestamp, datum_kwargs)
def dispatch(self, key, timestamp):
warnings.warn(
".dispatch is deprecated, use .generate_datum instead",
stacklevel=2
)
return self.generate_datum(key, timestamp, {})
dispatch.__doc__ = generate_datum.__doc__
def make_data_key(self):
source = 'PV:{}'.format(self.prefix)
# This shape is expected to match arr.shape for the array.
shape = (self.cam.num_images.get(),
self.cam.array_size.array_size_y.get(),
self.cam.array_size.array_size_x.get())
return dict(shape=shape, source=source, dtype='array',
external='FILESTORE:')
def collect_asset_docs(self):
file_plugins = [s for s in self._signals.values() if
hasattr(s, 'collect_asset_docs')]
for p in file_plugins:
yield from p.collect_asset_docs()
class AreaDetector(DetectorBase):
cam = C(cam.AreaDetectorCam, 'cam1:')
class SimDetector(DetectorBase):
_html_docs = ['simDetectorDoc.html']
cam = C(cam.SimDetectorCam, 'cam1:')
class AdscDetector(DetectorBase):
_html_docs = ['adscDoc.html']
cam = C(cam.AdscDetectorCam, 'cam1:')
class AndorDetector(DetectorBase):
_html_docs = ['andorDoc.html']
cam = C(cam.AndorDetectorCam, 'cam1:')
class Andor3Detector(DetectorBase):
_html_docs = ['andor3Doc.html']
cam = C(cam.Andor3DetectorCam, 'cam1:')
class BrukerDetector(DetectorBase):
_html_docs = ['BrukerDoc.html']
cam = C(cam.BrukerDetectorCam, 'cam1:')
class DexelaDetector(DetectorBase):
_html_docs = ['DexelaDoc.html']
cam = C(cam.DexelaDetectorCam, 'cam1:')
class EmergentVisionDetector(DetectorBase):
_html_docs = ['EVTDoc.html']
cam = C(cam.EmergentVisionDetectorCam, 'cam1:')
class EigerDetector(DetectorBase):
_html_docs = ['EigerDoc.html']
cam = C(cam.EigerDetectorCam, 'cam1:')
class FirewireLinDetector(DetectorBase):
_html_docs = ['FirewireWinDoc.html']
cam = C(cam.FirewireLinDetectorCam, 'cam1:')
class FirewireWinDetector(DetectorBase):
_html_docs = ['FirewireWinDoc.html']
cam = C(cam.FirewireWinDetectorCam, 'cam1:')
class GreatEyesDetector(DetectorBase):
_html_docs = [] # the documentation is not public
cam = C(cam.GreatEyesDetectorCam, 'cam1:')
class LightFieldDetector(DetectorBase):
_html_docs = ['LightFieldDoc.html']
cam = C(cam.LightFieldDetectorCam, 'cam1:')
class Mar345Detector(DetectorBase):
_html_docs = ['Mar345Doc.html']
cam = C(cam.Mar345DetectorCam, 'cam1:')
class MarCCDDetector(DetectorBase):
_html_docs = ['MarCCDDoc.html']
cam = C(cam.MarCCDDetectorCam, 'cam1:')
class PerkinElmerDetector(DetectorBase):
_html_docs = ['PerkinElmerDoc.html']
cam = C(cam.PerkinElmerDetectorCam, 'cam1:')
class PSLDetector(DetectorBase):
_html_docs = ['PSLDoc.html']
cam = C(cam.PSLDetectorCam, 'cam1:')
class PICamDetector(DetectorBase):
_html_docs = ['PICamDoc.html']
cam = C(cam.PICamDetectorCam, 'cam1:')
class PilatusDetector(DetectorBase):
_html_docs = ['pilatusDoc.html']
cam = C(cam.PilatusDetectorCam, 'cam1:')
class PixiradDetector(DetectorBase):
_html_docs = ['PixiradDoc.html']
cam = C(cam.PixiradDetectorCam, 'cam1:')
class PointGreyDetector(DetectorBase):
_html_docs = ['PointGreyDoc.html']
cam = C(cam.PointGreyDetectorCam, 'cam1:')
class ProsilicaDetector(DetectorBase):
_html_docs = ['prosilicaDoc.html']
cam = C(cam.ProsilicaDetectorCam, 'cam1:')
class PvcamDetector(DetectorBase):
_html_docs = ['pvcamDoc.html']
cam = C(cam.PvcamDetectorCam, 'cam1:')
class RoperDetector(DetectorBase):
_html_docs = ['RoperDoc.html']
cam = C(cam.RoperDetectorCam, 'cam1:')
class URLDetector(DetectorBase):
_html_docs = ['URLDoc.html']
cam = C(cam.URLDetectorCam, 'cam1:')
class UVCDetector(DetectorBase):
_html_docs = ['UVCDoc.html']
cam = C(cam.UVCDetectorCam, 'cam1:')
class Xspress3Detector(DetectorBase):
_html_docs = ['Xspress3Doc.html']
cam = C(cam.Xspress3DetectorCam, 'det1:')
| 27.264
| 81
| 0.639965
| 689
| 6,816
| 6.172714
| 0.330914
| 0.025394
| 0.044439
| 0.06466
| 0.063485
| 0.05549
| 0.05549
| 0.042323
| 0.021162
| 0.021162
| 0
| 0.009916
| 0.245452
| 6,816
| 249
| 82
| 27.373494
| 0.817033
| 0.165346
| 0
| 0.042254
| 0
| 0
| 0.184794
| 0.004001
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028169
| false
| 0
| 0.021127
| 0
| 0.640845
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
8a89fcb6aa9605bd61ebc69c816df71f6eb1ab81
| 673
|
py
|
Python
|
indico/modules/events/abstracts/compat.py
|
aiforrural/Digital-Events-Example
|
628aaa8727b259b9367ac0ae1c5ba8e9e95eca82
|
[
"MIT"
] | 1
|
2021-02-08T09:34:27.000Z
|
2021-02-08T09:34:27.000Z
|
indico/modules/events/abstracts/compat.py
|
pamirk/indico
|
c3b4e06b11cc21ad497f74d0b2ca901bc1b2a768
|
[
"MIT"
] | null | null | null |
indico/modules/events/abstracts/compat.py
|
pamirk/indico
|
c3b4e06b11cc21ad497f74d0b2ca901bc1b2a768
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask import redirect
from indico.modules.events.abstracts.models.abstracts import Abstract
from indico.web.flask.util import url_for
from indico.web.rh import RHSimple
@RHSimple.wrap_function
def compat_abstract(endpoint, confId, friendly_id, track_id=None, management=False):
abstract = Abstract.find(event_id=confId, friendly_id=friendly_id).first_or_404()
return redirect(url_for('abstracts.' + endpoint, abstract, management=management))
| 35.421053
| 86
| 0.786033
| 101
| 673
| 5.128713
| 0.594059
| 0.057915
| 0.050193
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018803
| 0.130758
| 673
| 18
| 87
| 37.388889
| 0.866667
| 0.297177
| 0
| 0
| 0
| 0
| 0.021459
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.5
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
8a8bbdd35a1d135f6e6a32befca7b762678940d4
| 327
|
py
|
Python
|
Python/Higher-Or-Lower/hol/__init__.py
|
AustinTSchaffer/DailyProgrammer
|
b16d9babb298ac5e879c514f9c4646b99c6860a8
|
[
"MIT"
] | 1
|
2020-07-28T17:07:35.000Z
|
2020-07-28T17:07:35.000Z
|
Python/Higher-Or-Lower/hol/__init__.py
|
AustinTSchaffer/DailyProgrammer
|
b16d9babb298ac5e879c514f9c4646b99c6860a8
|
[
"MIT"
] | 5
|
2021-04-06T18:25:29.000Z
|
2021-04-10T15:13:28.000Z
|
Python/Higher-Or-Lower/hol/__init__.py
|
AustinTSchaffer/DailyProgrammer
|
b16d9babb298ac5e879c514f9c4646b99c6860a8
|
[
"MIT"
] | null | null | null |
r"""
Contains classes and methods that can be used when simulating the game
Higher-or-Lower and performing statistical analysis on different games.
"""
from hol import (
cards,
constants,
)
from hol._hol import (
generate_all_games,
should_pick_higher,
is_a_winning_game,
generate_win_statistics,
)
| 17.210526
| 71
| 0.737003
| 45
| 327
| 5.133333
| 0.8
| 0.060606
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201835
| 327
| 18
| 72
| 18.166667
| 0.885057
| 0.434251
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a8bd51e1880ca1483e91fca0ab41237e4c4f869
| 4,896
|
py
|
Python
|
Lib/hTools2/dialogs/glyphs/slide.py
|
gferreira/hTools2
|
a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c
|
[
"BSD-3-Clause"
] | 11
|
2015-01-06T15:43:56.000Z
|
2019-07-27T00:35:20.000Z
|
Lib/hTools2/dialogs/glyphs/slide.py
|
gferreira/hTools2
|
a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c
|
[
"BSD-3-Clause"
] | 2
|
2017-05-17T10:11:46.000Z
|
2018-11-21T21:43:43.000Z
|
Lib/hTools2/dialogs/glyphs/slide.py
|
gferreira/hTools2
|
a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c
|
[
"BSD-3-Clause"
] | 4
|
2015-01-10T13:58:50.000Z
|
2019-12-18T15:40:14.000Z
|
# [h] slide selected glyphs
from mojo.roboFont import CurrentFont, CurrentGlyph, version
from vanilla import *
from hTools2 import hDialog
from hTools2.modules.fontutils import get_full_name, get_glyphs
from hTools2.modules.messages import no_font_open, no_glyph_selected
class slideGlyphsDialog(hDialog):
'''A dialog to slide the selected glyphs vertically and/or horizontally.
.. image:: imgs/glyphs/slide.png
'''
_moveX = 0
_moveY = 0
_xMax = 1000
_xMin = -1000
_yMax = 500
_yMin = -500
font = None
font_name = '(no font selected)'
def __init__(self):
# window
self.title = "slide"
self.button_width = 70
self.column_1 = 20
self.column_2 = 240
self.width = self.column_1 + self.column_2 + self.button_width + self.padding_x*3
self.height = self.text_height*3 + self.padding_y*4
self.w = HUDFloatingWindow((self.width, self.height), self.title)
x = self.padding_x
y = self.padding_y
# current font name
self.w.box = Box(
(x, y, self.column_1 + self.column_2, self.text_height))
self.w.box.text = TextBox(
(5, 0, self.column_1 + self.column_2, self.text_height),
self.font_name,
sizeStyle=self.size_style)
x += (self.column_2 + self.column_1 + self.padding_x)
self.w.button_update_font = SquareButton(
(x, y, self.button_width, self.text_height),
"update",
callback=self.update_font_callback,
sizeStyle=self.size_style)
# x slider
x = self.padding_x
y += self.text_height + self.padding_y
self.w.x_label = TextBox(
(x, y + 5, self.column_1, self.text_height),
"x",
sizeStyle=self.size_style)
x += self.column_1
self.w.x_slider = Slider(
(x, y, self.column_2, self.text_height),
value=0,
maxValue=self._xMax,
minValue=self._xMin,
callback=self.slide_callback,
sizeStyle=self.size_style)
x += (self.column_2 + self.padding_x)
self.w.button_restore_x = SquareButton(
(x, y, self.button_width, self.text_height),
"reset x",
callback=self.restore_x_callback,
sizeStyle=self.size_style)
# y slider
x = self.padding_x
y += (self.text_height + self.padding_y)
self.w.y_label = TextBox(
(x, y + 5, self.column_1, self.text_height),
"y",
sizeStyle=self.size_style)
x += self.column_1
self.w.y_slider = Slider(
(x, y, self.column_2, self.text_height),
value=0,
maxValue=self._yMax,
minValue=self._yMin,
callback=self.slide_callback,
sizeStyle=self.size_style)
x += (self.column_2 + self.padding_x)
self.w.button_restore_y = SquareButton(
(x, y, self.button_width, self.text_height),
"reset y",
callback=self.restore_y_callback,
sizeStyle=self.size_style)
# open
self.w.open()
self.update_font()
# callbacks
def restore_x(self):
self._moveX = 0
self.w.x_slider.set(self._moveX)
def restore_y(self):
self._moveY = 0
self.w.y_slider.set(self._moveY)
def restore_x_callback(self, sender):
self.restore_x()
def restore_y_callback(self, sender):
self.restore_y()
def update_font(self):
self.font = CurrentFont()
if self.font is not None:
self.w.box.text.set(get_full_name(self.font))
self.set_defaults()
self.restore_x()
self.restore_y()
else:
print no_font_open
def set_defaults(self):
self._xMax = self.font.info.unitsPerEm
self._yMax = self.font.info.unitsPerEm / 2
self._xMin = -self._xMax
self._yMin = -self._yMax
def update_font_callback(self, sender):
self.update_font()
def slide_callback(self, sender):
xValue = self.w.x_slider.get()
yValue = self.w.y_slider.get()
x = self._moveX - xValue
y = self._moveY - yValue
self._moveX = xValue
self._moveY = yValue
glyph_names = get_glyphs(self.font)
if len(glyph_names) > 0:
for glyph_name in glyph_names:
# RF 2.0
if version[0] == '2':
self.font[glyph_name].moveBy((-x, -y))
# RF 1.8.X
else:
self.font[glyph_name].move((-x, -y))
else:
print no_glyph_selected
| 31.184713
| 89
| 0.55576
| 611
| 4,896
| 4.217676
| 0.175123
| 0.069849
| 0.065192
| 0.046566
| 0.397749
| 0.35196
| 0.32402
| 0.313931
| 0.313931
| 0.282499
| 0
| 0.019565
| 0.34232
| 4,896
| 156
| 90
| 31.384615
| 0.780745
| 0.020221
| 0
| 0.291667
| 0
| 0
| 0.009854
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.041667
| null | null | 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a92dd9cacd718af3ee73590efc1c1d73a3833aa
| 12,093
|
py
|
Python
|
beansdbadmin/core/client.py
|
ariesdevil/beansdbadmin
|
3165087ef57b7511ab84fbc50cf16eb8f54d83cd
|
[
"BSD-3-Clause"
] | 11
|
2018-08-28T09:16:02.000Z
|
2021-11-08T09:39:15.000Z
|
beansdbadmin/core/client.py
|
ariesdevil/beansdbadmin
|
3165087ef57b7511ab84fbc50cf16eb8f54d83cd
|
[
"BSD-3-Clause"
] | 2
|
2019-08-29T03:27:24.000Z
|
2020-07-24T02:45:39.000Z
|
beansdbadmin/core/client.py
|
ariesdevil/beansdbadmin
|
3165087ef57b7511ab84fbc50cf16eb8f54d83cd
|
[
"BSD-3-Clause"
] | 4
|
2019-05-10T12:10:31.000Z
|
2020-07-17T03:22:02.000Z
|
#!/usr/bin/python
# encoding: utf-8
'''a rich client
1. for one server (instead of multi like in libmc.Client)
2. encapsulate @, ?, gc ...
use is instead of libmc.Client
'''
import telnetlib
import logging
import libmc
import string
import urllib
import itertools
import warnings
from collections import defaultdict
from beansdbadmin.core.hint import parse_new_hint_body
from beansdbadmin.core.data import parse_records
from beansdbadmin.core.hash import get_khash64
def get_url_content(url):
return urllib.urlopen(url).read()
def check_bucket(bucket):
assert 0 <= bucket < 16
def dir_to_dict(dir_str):
d = dict()
if dir_str:
for line in [x for x in dir_str.split('\n') if x]:
key_or_bucket, _hash, ver_or_count = line.split(' ')
d[key_or_bucket] = int(_hash) & 0xffff, int(ver_or_count)
return d
def get_bucket_keys_count(store, bucket, depth=1):
cmd = "@"
sub = bucket
if depth == 2:
cmd = "@%x" % (bucket/16)
sub = bucket % 16
result = store.get(cmd)
if result:
lines = result.split('\n')
for line in lines:
if len(line) == 0:
continue
d, _, c = line.split()
if d.endswith('/'):
bucket_ = int(d[0], 16)
if bucket_ == sub:
return int(c)
raise Exception('get %s from %s, reply = [%s], bucket %x not found' % (cmd, store, result, bucket))
def get_buckets_keys_count(store):
""" return dict: buckets -> count """
st = {}
try:
for line in (store.get('@') or '').split('\n'):
if line:
d, _, c = line.split(' ')
if not d.endswith('/'):
continue
st[int(d[0], 16)] = int(c)
return st
except IOError:
raise Exception("cannot get @ from %s" % (store))
def get_primary_buckets(store):
""" return possible primary buckets, might be wrong on temporary nodes,
result is list of buckets in integer
"""
ss = get_buckets_keys_count(store)
bucket_list = ss.items()
bucket_list = [x for x in bucket_list if x[1] > 0]
if not bucket_list:
return None
bucket_list.sort(lambda a, b: cmp(a[1], b[1]), reverse=True)
result = [bucket_list[0]]
for i in bucket_list[1:]:
if result[-1][1] / i[1] >= 2:
break
result.append(i)
return [x[0] for x in result]
def get_key_info_disk(store, key):
'''return ver, vhash, flag, vsz, ts, fid, pos'''
info = store.get('??' + key)
if info:
return [int(x) for x in info.split()]
def is_gc_running(ip, port):
s = get_gc_status(ip, port)
if s and s.find('running') >= 0:
return True
return False
def get_gc_status(ip, port):
t = telnetlib.Telnet(ip, port)
t.write('optimize_stat\r\n')
out = t.read_until('\n')
t.write('quit\r\n')
t.close()
return out.strip("\r\n")
def connect(server, **kwargs):
comp_threshold = kwargs.pop('comp_threshold', 0)
prefix = kwargs.pop('prefix', None)
if prefix is not None:
warnings.warn('"prefix" is deprecated. '
'use douban.wrapper.Prefix instead.')
c = libmc.Client([server],
do_split=0,
comp_threshold=comp_threshold,
prefix=prefix)
c.config(libmc.MC_CONNECT_TIMEOUT, 300) # 0.3s
c.config(libmc.MC_POLL_TIMEOUT, 3000) # 3s
c.config(libmc.MC_RETRY_TIMEOUT, 5) # 5s
return c
class MCStore(object):
IGNORED_LIBMC_RET = frozenset([
libmc.MC_RETURN_OK,
libmc.MC_RETURN_INVALID_KEY_ERR
])
def __init__(self, addr):
self.addr = addr
self.host, port = addr.split(":")
self.port = int(port)
self.mc = connect(addr)
def __repr__(self):
return '<MCStore(addr=%s)>' % repr(self.addr)
def __str__(self):
return self.addr
def set(self, key, data, rev=0):
return bool(self.mc.set(key, data, rev))
def set_raw(self, key, data, rev=0, flag=0):
if rev < 0:
raise Exception(str(rev))
return self.mc.set_raw(key, data, rev, flag)
def set_multi(self, values, return_failure=False):
return self.mc.set_multi(values, return_failure=return_failure)
def _check_last_error(self):
last_err = self.mc.get_last_error()
if last_err not in self.IGNORED_LIBMC_RET:
raise IOError(last_err, self.mc.get_last_strerror())
def get(self, key):
try:
r = self.mc.get(key)
if r is None:
self._check_last_error()
return r
except ValueError:
self.mc.delete(key)
def get_raw(self, key):
r, flag = self.mc.get_raw(key)
if r is None:
self._check_last_error()
return r, flag
def get_multi(self, keys):
r = self.mc.get_multi(keys)
self._check_last_error()
return r
def delete(self, key):
return bool(self.mc.delete(key))
def delete_multi(self, keys, return_failure=False):
return self.mc.delete_multi(keys, return_failure=return_failure)
def exists(self, key):
return bool(self.mc.get('?' + key))
def incr(self, key, value):
return self.mc.incr(key, int(value))
class DBClient(MCStore):
def __init__(self, addr):
MCStore.__init__(self, addr)
self._is_old = None
def stats(self):
stats = self.mc.stats()
return stats.values()[0] if stats else None
def is_old(self):
if self._is_old is None:
ver = self.get_server_version()
self._is_old = (ver.strip().split(".")[0] == "0")
return self._is_old
def get_collision_summary(self, bucket):
check_bucket(bucket)
raw = self.get("@collision_%x" % bucket)
if raw is None:
return None
count, hcount, khash, data_size = raw.split()
return (int(count), int(hcount), int(khash, 16), int(data_size))
def get_collision(self, bucket):
check_bucket(bucket)
collisions = defaultdict(dict)
hint_data = self.get("@collision_all_%x" % bucket)
if hint_data is None:
return dict()
for key, meta, _ in parse_new_hint_body(hint_data):
khash_str, _, ver, vhash = meta
collisions[khash_str][key] = (vhash, ver)
return dict(collisions)
def get_records_by_khash_raw(self, khash):
if self.is_old():
return []
if not isinstance(khash, str):
khash = "%016x" % khash
return self.get("@@" + khash)
def get_records_by_khash(self, khash_str):
raw = self.get_records_by_khash_raw(khash_str)
if raw:
return parse_records(raw, False)
else:
return []
def start_gc(self, bucket='', start_fid=0, end_fid=None):
""" bucket must be in 0 or 00 string """
if bucket:
assert isinstance(bucket, basestring) and len(bucket) <= 2
t = telnetlib.Telnet(self.host, self.port)
tree = '@%s' % bucket
if end_fid is None:
gc_cmd = 'gc {} {}\n'.format(tree, start_fid)
else:
gc_cmd = 'gc {} {} {}\n'.format(tree, start_fid, end_fid)
t.write(gc_cmd)
out = t.read_until('\n').strip('\r\n')
assert out == 'OK'
t.write('quit\n')
t.close()
def start_gc_all_buckets(self, db_depth):
hex_digits = string.digits + 'abcdef'
buckets_iter = itertools.product(*[hex_digits for _ in range(db_depth)])
buckets = [''.join(i) for i in buckets_iter]
self.start_gc_buckets(buckets)
def start_gc_buckets(self, buckets):
for b in buckets:
self.start_gc(bucket=b)
while True:
status = self.get_gc_status()
if status.find('running') >= 0:
continue
elif status == 'success':
print "bucket %s gc done" % b
break
elif status == 'fail':
return self.fail("optimize_stat = fail")
else:
self.fail(status)
def get_gc_status(self):
return get_gc_status(self.host, self.port)
def get_version(self, key):
meta = self.get("?" + key)
if meta:
return int(meta.split()[0])
def item_count(self):
s = self.stats()
if s is None:
return None
return int(s['total_items'])
def get_key_info_mem(self, key, khash64=None):
''' return (vhash, ver) or None'''
if khash64 is None:
khash64 = get_khash64(key)
khash32_str = "@%08x" % (khash64 >> 32)
_dir = self.get_dir(khash32_str)
if self.is_old():
return _dir.get(key, None)
else:
return _dir.get("%016x" % khash64, None)
def get_khash_info_mem(self, khash):
''' return [(key, (vhash, ver))], key is "" for v2.'''
khash32 = "@%08x" % (khash >> 32)
_dir = self.get_dir(khash32)
ret = []
if self.is_old():
for k, (vhash, ver) in _dir.iteritems():
if get_khash64(k) == khash:
ret.append((k, (vhash, ver)))
else:
for k, (vhash, ver) in _dir.iteritems():
if int(k, 16) == khash:
return [("", (int(vhash), ver))]
return ret
def get_server_version(self):
try:
st = self.stats()
if st:
return st["version"]
except IOError:
logging.error("fail to get version %s", self)
except KeyError:
logging.error("fail to get version %s %s", self, st)
def get_dir(self, path):
''' return dict
case1: map dir(0-f) to (hash, count),
like {'0/': (1471, 27784005), ... },
case2: map key(or khash) to (vhash, version),
like {'3000000377e9c2ad': (22212, 1), ... }'''
try:
content = self.get(path)
except IOError:
content = ''
return dir_to_dict(content)
def list_dir(self, d): # FIXME: d should not need prefix @?
'''list all KEY in the dir!
not use it if dir is large!'''
for path, (vhash, ver) in sorted(self.get_dir(d).items()):
if path.endswith('/') and len(path) == 2:
for v in self.list_dir(d + path[:-1]):
yield v
else:
yield path, int(vhash), int(ver)
def get_bucket_keys_count(self, bucket, depth=1):
return get_bucket_keys_count(self, bucket, depth)
def get_key_info_disk(self, key):
'''return ver, vhash, flag, vsz, ts, fid, pos'''
return get_key_info_disk(self, key)
def prepare(self, data):
return libmc.encode_value(data, self.mc.comp_threshold)
def close(self):
pass
def test_new(addr, bucket):
b = bucket
c = DBClient(addr)
print "stats:", c.stats()
print 'version:', c.get_server_version()
print "isold:", c.is_old()
print "dir root:", c.get_dir("@")
print "bucket key count:", c.get_bucket_keys_count(int(b))
print "item_count:", c.item_count()
print "primary_buckets", get_primary_buckets(c)
leaf = c.get_dir("@" + b + "000000")
print "a dir leaf:", leaf
khash_str = list(leaf)[0]
print "a khash_str", khash_str
r = c.get_records_by_khash(khash_str)[0]
k = r[0]
print "key, len(value), (flag, tstamp, ver):", k, r[1], r[3:]
print "key info mem:", c.get_key_info_mem(k)
print "key info disk(ver, vhash, flag, vsz, ts, fid, pos):", \
c.get_key_info_disk(k)
print "key version:", c.get_version(k)
print "collision_summary", c.get_collision_summary(int(b))
print "gc status:", c.get_gc_status()
if __name__ == '__main__':
test_new("rosa3a:7900", '3')
| 30.308271
| 103
| 0.561399
| 1,649
| 12,093
| 3.936325
| 0.164342
| 0.019411
| 0.009706
| 0.011092
| 0.173779
| 0.099676
| 0.060545
| 0.037899
| 0.02126
| 0.0114
| 0
| 0.018807
| 0.309683
| 12,093
| 398
| 104
| 30.384422
| 0.758745
| 0.00645
| 0
| 0.133117
| 0
| 0
| 0.063222
| 0.001881
| 0
| 0
| 0.000537
| 0.002513
| 0.00974
| 0
| null | null | 0.003247
| 0.035714
| null | null | 0.051948
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a975211bf46410d2e2a9a98de298bed52013baa
| 6,589
|
py
|
Python
|
lib/formatter/text.py
|
ylafon/redbot
|
87f4edcc8ccda35f556331abd1e76d5e9b79cdd0
|
[
"Unlicense"
] | null | null | null |
lib/formatter/text.py
|
ylafon/redbot
|
87f4edcc8ccda35f556331abd1e76d5e9b79cdd0
|
[
"Unlicense"
] | null | null | null |
lib/formatter/text.py
|
ylafon/redbot
|
87f4edcc8ccda35f556331abd1e76d5e9b79cdd0
|
[
"Unlicense"
] | 1
|
2021-06-01T12:08:29.000Z
|
2021-06-01T12:08:29.000Z
|
#!/usr/bin/env python
"""
HAR Formatter for REDbot.
"""
__author__ = "Jerome Renard <[email protected]>"
__copyright__ = """\
Copyright (c) 2008-2010 Mark Nottingham
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import operator
import nbhttp.error as nberr
import redbot.speak as rs
from redbot.formatter import Formatter
nl = u"\n"
# TODO: errors and status on stderr with CLI?
class BaseTextFormatter(Formatter):
"""
Base class for text formatters."""
media_type = "text/plain"
msg_categories = [
rs.c.GENERAL, rs.c.CONNECTION, rs.c.CONNEG,
rs.c.CACHING, rs.c.VALIDATION, rs.c.RANGE
]
link_order = [
('link', 'Head Links'),
('script', 'Script Links'),
('frame', 'Frame Links'),
('iframe', 'IFrame Links'),
('img', 'Image Links'),
]
error_template = "Error: %s\n"
def __init__(self, *args, **kw):
Formatter.__init__(self, *args, **kw)
def start_output(self):
pass
def feed(self, red, chunk):
pass
def status(self, msg):
pass
def finish_output(self):
"Fill in the template with RED's results."
if self.red.res_complete:
self.output(self.format_headers(self.red) + nl + nl)
self.output(self.format_recommendations(self.red) + nl)
else:
if self.red.res_error == None:
pass
elif self.red.res_error['desc'] == nberr.ERR_CONNECT['desc']:
self.output(self.error_template % "Could not connect to the server (%s)" % \
self.red.res_error.get('detail', "unknown"))
elif self.red.res_error['desc'] == nberr.ERR_URL['desc']:
self.output(self.error_template % self.red.res_error.get(
'detail', "RED can't fetch that URL."))
elif self.red.res_error['desc'] == nberr.ERR_READ_TIMEOUT['desc']:
self.output(self.error_template % self.red.res_error['desc'])
elif self.red.res_error['desc'] == nberr.ERR_HTTP_VERSION['desc']:
self.output(self.error_template % "<code>%s</code> isn't HTTP." % \
self.red.res_error.get('detail', '')[:20])
else:
raise AssertionError, "Unidentified incomplete response error."
def format_headers(self, red):
out = [u"HTTP/%s %s %s" % (
red.res_version, red.res_status, red.res_phrase)]
return nl.join(out + [u"%s:%s" % h for h in red.res_hdrs])
def format_recommendations(self, red):
return "".join([self.format_recommendation(red, category) \
for category in self.msg_categories])
def format_recommendation(self, red, category):
messages = [msg for msg in red.messages if msg.category == category]
if not messages:
return ""
out = []
if [msg for msg in messages]:
out.append(u"* %s:" % category)
for m in messages:
out.append(
u" * %s" % (self.colorize(m.level, m.summary["en"] % m.vars))
)
smsgs = [msg for msg in getattr(m.subrequest, "messages", []) if msg.level in [rs.l.BAD]]
if smsgs:
out.append("")
for sm in smsgs:
out.append(
u" * %s" %
(self.colorize(sm.level, sm.summary["en"] % sm.vars))
)
out.append(nl)
out.append(nl)
return nl.join(out)
@staticmethod
def colorize(level, string):
# info
color_start = u"\033[0;32m"
color_end = u"\033[0;39m"
if level == "good":
color_start = u"\033[1;32m"
color_end = u"\033[0;39m"
if level == "bad":
color_start = u"\033[1;31m"
color_end = u"\033[0;39m"
if level == "warning":
color_start = u"\033[1;33m"
color_end = u"\033[0;39m"
if level == "uri":
color_start = u"\033[1;34m"
color_end = u"\033[0;39m"
return color_start + string + color_end
class TextFormatter(BaseTextFormatter):
"""
Format a RED object as text.
"""
name = "txt"
media_type = "text/plain"
def __init__(self, *args, **kw):
BaseTextFormatter.__init__(self, *args, **kw)
def finish_output(self):
BaseTextFormatter.finish_output(self)
self.done()
class TextListFormatter(BaseTextFormatter):
"""
Format multiple RED responses as a textual list.
"""
name = "txt"
media_type = "text/plain"
can_multiple = True
def __init__(self, *args, **kw):
BaseTextFormatter.__init__(self, *args, **kw)
def finish_output(self):
"Fill in the template with RED's results."
BaseTextFormatter.finish_output(self)
sep = "=" * 78
for hdr_tag, heading in self.link_order:
droids = [d[0] for d in self.red.link_droids if d[1] == hdr_tag]
self.output("%s\n%s (%d)\n%s\n" % (
sep, heading, len(droids), sep
))
if droids:
droids.sort(key=operator.attrgetter('uri'))
for droid in droids:
self.output(self.format_uri(droid) + nl + nl)
self.output(self.format_headers(droid) + nl + nl)
self.output(self.format_recommendations(droid) + nl + nl)
self.done()
def format_uri(self, red):
return self.colorize("uri", red.uri)
| 33.277778
| 101
| 0.587039
| 850
| 6,589
| 4.437647
| 0.303529
| 0.033404
| 0.026511
| 0.03579
| 0.27492
| 0.239926
| 0.170467
| 0.140774
| 0.095705
| 0.081919
| 0
| 0.015914
| 0.294278
| 6,589
| 197
| 102
| 33.446701
| 0.795269
| 0.010472
| 0
| 0.234483
| 0
| 0
| 0.26713
| 0.003947
| 0
| 0
| 0
| 0.005076
| 0.006897
| 0
| null | null | 0.027586
| 0.027586
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a995f399ed25fbe111acb3f8ad5749b538eef0a
| 433
|
py
|
Python
|
python/re_user.py
|
seckcoder/lang-learn
|
1e0d6f412bbd7f89b1af00293fd907ddb3c1b571
|
[
"Unlicense"
] | 1
|
2017-10-14T04:23:45.000Z
|
2017-10-14T04:23:45.000Z
|
python/re_user.py
|
seckcoder/lang-learn
|
1e0d6f412bbd7f89b1af00293fd907ddb3c1b571
|
[
"Unlicense"
] | null | null | null |
python/re_user.py
|
seckcoder/lang-learn
|
1e0d6f412bbd7f89b1af00293fd907ddb3c1b571
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
#-*- coding=utf-8 -*-
#
# Copyright 2012 Jike Inc. All Rights Reserved.
# Author: [email protected]
import re
from urlparse import urlparse
def parse1():
p = re.compile(r"/(?P<uid>\d+)/(?P<mid>\w+)")
o = urlparse("http://weibo.com/2827699110/yz62AlEjF")
m = p.search(o.path)
print m.group('uid')
print m.group('mid')
def parse2():
exc_type_str = "<type 'exceptions.IndexError'>"
parse1()
| 22.789474
| 57
| 0.637413
| 65
| 433
| 4.215385
| 0.707692
| 0.043796
| 0.080292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055249
| 0.163972
| 433
| 18
| 58
| 24.055556
| 0.701657
| 0.251732
| 0
| 0
| 0
| 0
| 0.310345
| 0.15674
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.181818
| null | null | 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a9a247a499b63acd31b3bc3a6e73d3d156a0e43
| 1,903
|
py
|
Python
|
Assignment1/Part2/Bridge2.py
|
MormonJesus69420/Knowledge-Based-Systems-Project
|
8b1e330c64dd58743513f3e48efb6569457beb94
|
[
"WTFPL"
] | null | null | null |
Assignment1/Part2/Bridge2.py
|
MormonJesus69420/Knowledge-Based-Systems-Project
|
8b1e330c64dd58743513f3e48efb6569457beb94
|
[
"WTFPL"
] | null | null | null |
Assignment1/Part2/Bridge2.py
|
MormonJesus69420/Knowledge-Based-Systems-Project
|
8b1e330c64dd58743513f3e48efb6569457beb94
|
[
"WTFPL"
] | null | null | null |
from dataclasses import dataclass, field
from typing import List
from Car2 import Car
@dataclass
class Bridge:
"""Bridge class simulating the behaviour of bridge in simulation.
On can set specific length and capacity for the bridge to change the overall
behaviour of bridge in the simulation and see how it impacts the scores for
cars.
"""
capacity: int = field(default=5)
"""Set amount of cars that the bridge can accommodate before collapsing."""
length: int = field(default=10)
"""Length of bridge deciding how much time a car will use to cross it."""
cars: List[Car] = field(default_factory=list, repr=False, init=False)
"""List of all of the cars that are currently on the bridge."""
def has_collapsed(self) -> bool:
"""Simple method to check if bridge has collapsed.
Returns:
bool: True if bridge has collapsed, False otherwise.
"""
return len(self.cars) > self.capacity
def move_cars(self) -> List[Car]:
""" Moves cars across the bridge and returns cars that have crossed it.
Returns:
List[Car]: List of cars that have crossed the bridge this turn.
"""
finished_cars = list()
for c in self.cars:
c.distance_on_bridge += c.speed
if c.distance_on_bridge >= self.length:
c.distance_on_bridge = 0
finished_cars.append(c)
self.cars = [c for c in self.cars if c not in finished_cars]
return finished_cars
def collapse_bridge(self) -> List[Car]:
"""Returns a list of all cars on bridge and sets cars to empty list.
Returns:
List[Car]: List of cars that were on bridge when it collapsed.
"""
temp = self.cars
for c in temp:
c.distance_on_bridge = 0
self.cars = list()
return temp
| 28.833333
| 80
| 0.62743
| 268
| 1,903
| 4.395522
| 0.339552
| 0.040747
| 0.037351
| 0.057725
| 0.101868
| 0.047538
| 0.047538
| 0
| 0
| 0
| 0
| 0.004481
| 0.296374
| 1,903
| 65
| 81
| 29.276923
| 0.87528
| 0.328954
| 0
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.12
| 0
| 0.52
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
8a9d019bec9e50c7c8d759ea60e658149d43ef2a
| 2,561
|
py
|
Python
|
audiomentations/core/utils.py
|
jeongyoonlee/audiomentations
|
7f0112ae310989430e0ef7eb32c4116114810966
|
[
"MIT"
] | 1
|
2021-02-03T19:12:04.000Z
|
2021-02-03T19:12:04.000Z
|
audiomentations/core/utils.py
|
jeongyoonlee/audiomentations
|
7f0112ae310989430e0ef7eb32c4116114810966
|
[
"MIT"
] | null | null | null |
audiomentations/core/utils.py
|
jeongyoonlee/audiomentations
|
7f0112ae310989430e0ef7eb32c4116114810966
|
[
"MIT"
] | 1
|
2021-07-08T07:33:10.000Z
|
2021-07-08T07:33:10.000Z
|
import os
from pathlib import Path
import numpy as np
AUDIO_FILENAME_ENDINGS = (".aiff", ".flac", ".m4a", ".mp3", ".ogg", ".opus", ".wav")
def get_file_paths(
root_path, filename_endings=AUDIO_FILENAME_ENDINGS, traverse_subdirectories=True
):
"""Return a list of paths to all files with the given filename extensions in a directory.
Also traverses subdirectories by default.
"""
file_paths = []
for root, dirs, filenames in os.walk(root_path):
filenames = sorted(filenames)
for filename in filenames:
input_path = os.path.abspath(root)
file_path = os.path.join(input_path, filename)
if filename.lower().endswith(filename_endings):
file_paths.append(Path(file_path))
if not traverse_subdirectories:
# prevent descending into subfolders
break
return file_paths
def calculate_rms(samples):
"""Given a numpy array of audio samples, return its Root Mean Square (RMS)."""
return np.sqrt(np.mean(np.square(samples), axis=-1))
def calculate_desired_noise_rms(clean_rms, snr):
"""
Given the Root Mean Square (RMS) of a clean sound and a desired signal-to-noise ratio (SNR),
calculate the desired RMS of a noise sound to be mixed in.
Based on https://github.com/Sato-Kunihiko/audio-SNR/blob/8d2c933b6c0afe6f1203251f4877e7a1068a6130/create_mixed_audio_file.py#L20
:param clean_rms: Root Mean Square (RMS) - a value between 0.0 and 1.0
:param snr: Signal-to-Noise (SNR) Ratio in dB - typically somewhere between -20 and 60
:return:
"""
a = float(snr) / 20
noise_rms = clean_rms / (10 ** a)
return noise_rms
def convert_decibels_to_amplitude_ratio(decibels):
return 10 ** (decibels / 20)
def is_waveform_multichannel(samples):
"""
Return bool that answers the question: Is the given ndarray a multichannel waveform or not?
:param samples: numpy ndarray
:return:
"""
return len(samples.shape) > 1
def is_spectrogram_multichannel(spectrogram):
"""
Return bool that answers the question: Is the given ndarray a multichannel spectrogram?
:param samples: numpy ndarray
:return:
"""
return len(spectrogram.shape) > 2 and spectrogram.shape[-1] > 1
def convert_float_samples_to_int16(y):
"""Convert floating-point numpy array of audio samples to int16."""
if not issubclass(y.dtype.type, np.floating):
raise ValueError("input samples not floating-point")
return (y * np.iinfo(np.int16).max).astype(np.int16)
| 31.617284
| 132
| 0.689184
| 353
| 2,561
| 4.878187
| 0.373938
| 0.034843
| 0.02439
| 0.029617
| 0.14518
| 0.117305
| 0.117305
| 0.072009
| 0.072009
| 0.072009
| 0
| 0.030243
| 0.212417
| 2,561
| 80
| 133
| 32.0125
| 0.8235
| 0.393596
| 0
| 0
| 0
| 0
| 0.043538
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.205882
| false
| 0
| 0.088235
| 0.029412
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8aa50b5f8d204a63672c266b3319435ba3678601
| 2,686
|
py
|
Python
|
insight/migrations/0001_initial.py
|
leonhead/chess-insight
|
b893295719df21b4fee10d4e7b01639ded8b42b4
|
[
"MIT"
] | null | null | null |
insight/migrations/0001_initial.py
|
leonhead/chess-insight
|
b893295719df21b4fee10d4e7b01639ded8b42b4
|
[
"MIT"
] | null | null | null |
insight/migrations/0001_initial.py
|
leonhead/chess-insight
|
b893295719df21b4fee10d4e7b01639ded8b42b4
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1 on 2020-09-08 07:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='OpeningSystem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='Opening',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('eco', models.CharField(max_length=3)),
('moves', models.TextField()),
('opening_system', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insight.openingsystem')),
],
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('elo_mean', models.IntegerField(default=0)),
('elo_diff', models.IntegerField(default=0)),
('result', models.CharField(max_length=40)),
('timecontrol', models.CharField(max_length=40)),
('timestamp', models.DateTimeField()),
('raw', models.TextField()),
('opening', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insight.opening')),
],
),
migrations.CreateModel(
name='Analyse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('turnover_move', models.IntegerField(default=0)),
('turnover_evaluation', models.IntegerField(default=0)),
('unbalance_material', models.IntegerField(default=0)),
('unbalance_officers', models.IntegerField(default=0)),
('unbalance_exchange', models.IntegerField(default=0)),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insight.game')),
],
),
]
| 41.323077
| 127
| 0.562919
| 252
| 2,686
| 5.873016
| 0.281746
| 0.085135
| 0.118243
| 0.122973
| 0.593243
| 0.487162
| 0.487162
| 0.487162
| 0.487162
| 0.487162
| 0
| 0.016869
| 0.293745
| 2,686
| 64
| 128
| 41.96875
| 0.76331
| 0.016009
| 0
| 0.491228
| 1
| 0
| 0.105642
| 0.007952
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.035088
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8aae1314a34df4a8c2038ff3f05e19541e560962
| 2,489
|
py
|
Python
|
tests/integration/test_cmk_describe.py
|
oglok/CPU-Manager-for-Kubernetes
|
503f37dcb20452699ce789b6628fa3ebeb9ffb54
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_cmk_describe.py
|
oglok/CPU-Manager-for-Kubernetes
|
503f37dcb20452699ce789b6628fa3ebeb9ffb54
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_cmk_describe.py
|
oglok/CPU-Manager-for-Kubernetes
|
503f37dcb20452699ce789b6628fa3ebeb9ffb54
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import helpers
from . import integration
def test_cmk_describe_ok():
args = ["describe", "--conf-dir={}".format(helpers.conf_dir("ok"))]
assert helpers.execute(integration.cmk(), args) == b"""{
"path": "/cmk/tests/data/config/ok",
"pools": {
"exclusive": {
"cpuLists": {
"4,12": {
"cpus": "4,12",
"tasks": [
2000
]
},
"5,13": {
"cpus": "5,13",
"tasks": [
2001
]
},
"6,14": {
"cpus": "6,14",
"tasks": [
2002
]
},
"7,15": {
"cpus": "7,15",
"tasks": [
2003
]
}
},
"exclusive": true,
"name": "exclusive"
},
"infra": {
"cpuLists": {
"0-2,8-10": {
"cpus": "0-2,8-10",
"tasks": [
3000,
3001,
3002
]
}
},
"exclusive": false,
"name": "infra"
},
"shared": {
"cpuLists": {
"3,11": {
"cpus": "3,11",
"tasks": [
1000,
1001,
1002,
1003
]
}
},
"exclusive": false,
"name": "shared"
}
}
}
"""
def test_cmk_describe_minimal():
args = ["describe",
"--conf-dir={}".format(helpers.conf_dir("minimal"))]
assert helpers.execute(integration.cmk(), args) == b"""{
"path": "/cmk/tests/data/config/minimal",
"pools": {
"exclusive": {
"cpuLists": {
"0": {
"cpus": "0",
"tasks": []
}
},
"exclusive": true,
"name": "exclusive"
},
"shared": {
"cpuLists": {
"0": {
"cpus": "0",
"tasks": []
}
},
"exclusive": false,
"name": "shared"
}
}
}
"""
| 21.273504
| 74
| 0.451185
| 242
| 2,489
| 4.607438
| 0.487603
| 0.053812
| 0.04843
| 0.0287
| 0.229596
| 0.229596
| 0.179372
| 0.179372
| 0.109417
| 0.109417
| 0
| 0.0625
| 0.382885
| 2,489
| 116
| 75
| 21.456897
| 0.663411
| 0.224588
| 0
| 0.367347
| 0
| 0
| 0.810021
| 0.031837
| 0
| 0
| 0
| 0
| 0.020408
| 1
| 0.020408
| false
| 0
| 0.020408
| 0
| 0.040816
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8aafa8be4338ac950ec6be097349874901cbc17e
| 3,807
|
py
|
Python
|
tests/test_client.py
|
mgobec/python-memcached
|
8ea5fe5fca3a4f0d1201ca9aa50f9701c1baab01
|
[
"Apache-2.0"
] | 1
|
2019-07-19T18:09:38.000Z
|
2019-07-19T18:09:38.000Z
|
tests/test_client.py
|
mgobec/python-memcached
|
8ea5fe5fca3a4f0d1201ca9aa50f9701c1baab01
|
[
"Apache-2.0"
] | null | null | null |
tests/test_client.py
|
mgobec/python-memcached
|
8ea5fe5fca3a4f0d1201ca9aa50f9701c1baab01
|
[
"Apache-2.0"
] | null | null | null |
import collections
import unittest
import driver
from driver.protocol import *
_server = ('localhost', 11211)
_dead_retry = 30
_socket_timeout = 3
_max_receive_size = 4096
class MockConnection(object):
def __init__(self,
server=_server,
dead_retry=30,
socket_timeout=3):
self.server = server
self.dead_retry = dead_retry
self.socket_timeout = socket_timeout
self.closed = True
self.socket = None
self.send_buffer = collections.deque()
self.receive_buffer = collections.deque()
self.on_read = None
self.on_write = None
def open(self):
self.closed = False
self.socket = True
return True
def close(self):
self.closed = True
self.socket = None
def send(self, data):
if self.on_write is not None:
self.on_write()
self.send_buffer.append(data)
def read(self, size=_max_receive_size):
if self.on_read is not None:
self.on_read()
return self.receive_buffer.popleft()
class ClientTests(unittest.TestCase):
def setUp(self):
self.client = driver.Client(_server)
self.mock = MockConnection()
self.client._connection = self.mock
self.client.connect()
def test_initialize_and_connect(self):
self.assertFalse(self.mock.closed)
def test_disconnect(self):
self.client.disconnect()
self.assertTrue(self.mock.closed)
def test_set_value_without_response(self):
self.client.set('testkey', 'testvalue')
self.assertEqual(self.mock.send_buffer.pop(), b'set testkey 0 0 9 noreply\r\ntestvalue\r\n')
def test_set_value_with_stored_response(self):
self.mock.receive_buffer.append(StoreReply.STORED + Constants.END_LINE)
response = self.client.set('testkey', 'testvalue', 0, False)
self.assertTrue(response)
def test_set_value_with_not_stored_response(self):
self.mock.receive_buffer.append(StoreReply.NOT_STORED + Constants.END_LINE)
response = self.client.set('testkey', 'testvalue', 0, False)
self.assertFalse(response)
def test_set_value_with_exists_response(self):
self.mock.receive_buffer.append(StoreReply.EXISTS + Constants.END_LINE)
response = self.client.set('testkey', 'testvalue', 0, False)
self.assertFalse(response)
def test_set_value_with_error_response(self):
self.mock.receive_buffer.append(Errors.ERROR + Constants.END_LINE)
with self.assertRaises(driver.DriverUnknownException):
self.client.set('testkey', 'testvalue', 0, False)
def test_set_value_with_server_error_response(self):
self.mock.receive_buffer.append(Errors.SERVER_ERROR + b' Test server error' + Constants.END_LINE)
with self.assertRaises(driver.DriverServerException):
self.client.set('testkey', 'testvalue', 0, False)
def test_set_value_with_client_error_response(self):
self.mock.receive_buffer.append(Errors.CLIENT_ERROR + b' Test client error' + Constants.END_LINE)
with self.assertRaises(driver.DriverClientException):
self.client.set('testkey', 'testvalue', 0, False)
def test_set_value_exception(self):
error_message = "Test write exception"
self.mock.on_write = lambda: _raise_exception(error_message)
result = self.client.set('testkey', 'testvalue', 0, False)
self.assertFalse(result)
def test_get_value_exception(self):
error_message = "Test read exception"
self.mock.on_read = lambda: _raise_exception(error_message)
result = self.client.get('testkey')
self.assertIsNone(result)
def _raise_exception(message):
raise Exception(message)
| 34.609091
| 105
| 0.677699
| 468
| 3,807
| 5.279915
| 0.196581
| 0.05261
| 0.032376
| 0.048563
| 0.541076
| 0.492513
| 0.413193
| 0.413193
| 0.301497
| 0.176042
| 0
| 0.008452
| 0.22301
| 3,807
| 109
| 106
| 34.926606
| 0.82691
| 0
| 0
| 0.137931
| 0
| 0
| 0.068558
| 0.006304
| 0
| 0
| 0
| 0
| 0.126437
| 1
| 0.206897
| false
| 0
| 0.045977
| 0
| 0.298851
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8ab94a7177eff40dfe2d54daa4adb7bbd8788e95
| 1,084
|
py
|
Python
|
elm_mnist/elm_mnist.py
|
ahara/-blog
|
926ae4808ede6efb1e64381a19a210235a97ac36
|
[
"MIT"
] | null | null | null |
elm_mnist/elm_mnist.py
|
ahara/-blog
|
926ae4808ede6efb1e64381a19a210235a97ac36
|
[
"MIT"
] | null | null | null |
elm_mnist/elm_mnist.py
|
ahara/-blog
|
926ae4808ede6efb1e64381a19a210235a97ac36
|
[
"MIT"
] | null | null | null |
import cPickle
import numpy as np
from elm import ELMClassifier
from sklearn import linear_model
def load_mnist(path='../Data/mnist.pkl'):
with open(path, 'rb') as f:
return cPickle.load(f)
def get_datasets(data):
_train_x, _train_y = data[0][0], np.array(data[0][1]).reshape(len(data[0][1]), 1)
_val_x, _val_y = data[1][0], np.array(data[1][1]).reshape(len(data[1][1]), 1)
_test_x, _test_y = data[2][0], np.array(data[2][1]).reshape(len(data[2][1]), 1)
return _train_x, _train_y, _val_x, _val_y, _test_x, _test_y
if __name__ == '__main__':
# Load data sets
train_x, train_y, val_x, val_y, test_x, test_y = get_datasets(load_mnist())
# Build ELM
cls = ELMClassifier(n_hidden=7000,
alpha=0.93,
activation_func='multiquadric',
regressor=linear_model.Ridge(),
random_state=21398023)
cls.fit(train_x, train_y)
# Evaluate model
print 'Validation error:', cls.score(val_x, val_y)
print 'Test error:', cls.score(test_x, test_y)
| 32.848485
| 85
| 0.621771
| 171
| 1,084
| 3.631579
| 0.350877
| 0.016103
| 0.070853
| 0.077295
| 0.096618
| 0.096618
| 0.096618
| 0.096618
| 0.096618
| 0.096618
| 0
| 0.043373
| 0.234317
| 1,084
| 32
| 86
| 33.875
| 0.704819
| 0.035978
| 0
| 0
| 0
| 0
| 0.064361
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.181818
| null | null | 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8abc0d6dcbf21ec8770db13b5b8c148d9b2c8d8e
| 1,607
|
py
|
Python
|
migrations/versions/0084_add_job_stats.py
|
cds-snc/notifier-api
|
90b385ec49efbaee7e607516fc7d9f08991af813
|
[
"MIT"
] | 41
|
2019-11-28T16:58:41.000Z
|
2022-01-28T21:11:16.000Z
|
migrations/versions/0084_add_job_stats.py
|
cds-snc/notification-api
|
b1c1064f291eb860b494c3fa65ac256ad70bf47c
|
[
"MIT"
] | 1,083
|
2019-07-08T12:57:24.000Z
|
2022-03-08T18:53:40.000Z
|
migrations/versions/0084_add_job_stats.py
|
cds-snc/notifier-api
|
90b385ec49efbaee7e607516fc7d9f08991af813
|
[
"MIT"
] | 9
|
2020-01-24T19:56:43.000Z
|
2022-01-27T21:36:53.000Z
|
"""empty message
Revision ID: 0084_add_job_stats
Revises: 0083_add_perm_types_and_svc_perm
Create Date: 2017-05-12 13:16:14.147368
"""
# revision identifiers, used by Alembic.
revision = "0084_add_job_stats"
down_revision = "0083_add_perm_types_and_svc_perm"
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table(
"job_statistics",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("job_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("emails_sent", sa.BigInteger(), nullable=False),
sa.Column("emails_delivered", sa.BigInteger(), nullable=False),
sa.Column("emails_failed", sa.BigInteger(), nullable=False),
sa.Column("sms_sent", sa.BigInteger(), nullable=False),
sa.Column("sms_delivered", sa.BigInteger(), nullable=False),
sa.Column("sms_failed", sa.BigInteger(), nullable=False),
sa.Column("letters_sent", sa.BigInteger(), nullable=False),
sa.Column("letters_failed", sa.BigInteger(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["job_id"],
["jobs.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_job_statistics_job_id"), "job_statistics", ["job_id"], unique=True)
def downgrade():
op.drop_index(op.f("ix_job_statistics_job_id"), table_name="job_statistics")
op.drop_table("job_statistics")
| 35.711111
| 96
| 0.683261
| 211
| 1,607
| 4.962085
| 0.312796
| 0.091691
| 0.143266
| 0.200573
| 0.579752
| 0.574021
| 0.524355
| 0.143266
| 0.08978
| 0.08978
| 0
| 0.026946
| 0.168637
| 1,607
| 44
| 97
| 36.522727
| 0.756737
| 0.104543
| 0
| 0
| 0
| 0
| 0.209644
| 0.055905
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.1
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8ac88b2d708e6c6e6407bbbd9d9661fb3c6143fd
| 495
|
py
|
Python
|
molecule/ubuntu/tests/test_grafana.py
|
fiaasco/grafana
|
6a5963e43033d88b5bb4760d47755da1069ec26b
|
[
"MIT"
] | null | null | null |
molecule/ubuntu/tests/test_grafana.py
|
fiaasco/grafana
|
6a5963e43033d88b5bb4760d47755da1069ec26b
|
[
"MIT"
] | null | null | null |
molecule/ubuntu/tests/test_grafana.py
|
fiaasco/grafana
|
6a5963e43033d88b5bb4760d47755da1069ec26b
|
[
"MIT"
] | null | null | null |
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_package(host):
""" check if packages are installed
"""
assert host.package('grafana').is_installed
def test_service(host):
""" Testing whether the service is running and enabled
"""
assert host.service('grafana-server').is_enabled
assert host.service('grafana-server').is_running
| 24.75
| 63
| 0.739394
| 63
| 495
| 5.634921
| 0.539683
| 0.084507
| 0.11831
| 0.152113
| 0.219718
| 0.219718
| 0.219718
| 0
| 0
| 0
| 0
| 0
| 0.147475
| 495
| 19
| 64
| 26.052632
| 0.841232
| 0.175758
| 0
| 0
| 0
| 0
| 0.15641
| 0.058974
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.222222
| false
| 0
| 0.222222
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8ac941eb3b632a517433fbaf339a5dae04e7e556
| 6,534
|
py
|
Python
|
heatsink.py
|
sww1235/heatsink-calc
|
3f28ac33b629ab5a12ddea4964f6dbe7dbc3e759
|
[
"MIT"
] | 1
|
2020-11-20T07:09:00.000Z
|
2020-11-20T07:09:00.000Z
|
heatsink.py
|
sww1235/heatsink-calc
|
3f28ac33b629ab5a12ddea4964f6dbe7dbc3e759
|
[
"MIT"
] | null | null | null |
heatsink.py
|
sww1235/heatsink-calc
|
3f28ac33b629ab5a12ddea4964f6dbe7dbc3e759
|
[
"MIT"
] | null | null | null |
"""Class representations of heatsinks."""
import math
from scipy import constants as const
from materials import Aluminium_6063 as aluminium
class Heatsink:
"""
A Heatsink.
Extended by form factor subclasses
"""
def __init__(self, material, configuration):
"""Init material and configuration variables."""
self.material = material
self.configuration = configuration
class CylindricalAnnularFin(Heatsink):
"""Extend base heatsink class with a cylindrical annular fin heatsink."""
def __init__(self, material, finSpacing, finRadius,
finThickness, cylinderDiameter, numberOfFins,
ambAirTemp, maxJunctionTemp, maxSurfaceTemp):
"""
Init remainder of class variables.
NOTE: all models are based off of the finSpacing variable
NOTE: using the simplified model for calculation efficiency.
finSpacing : gap between adjacent fins
finRadius : radius of fin minus central support cylinder
(alternatively, fin depth)
finThickness : thickness of individual fin
cylinderDiameter: diameter of support cylinder
heatsinkLength : overall axial length of heatsink
overall diameter: outside diameter of heatsink including fins.
"""
self.finSpacing = finSpacing # in meters
self.finRadius = finRadius # in meters
self.finThickness = finThickness # in meters
self.cylinderDiameter = cylinderDiameter # in meters
self.numberOfFins = numberofFins
self.heatsinkLength = ((self.finThickness * self.numberOfFins)
+ ((self.numberOfFins - 1) * self.finSpacing))
self.overallDiameter = self.cylinderDiameter + (2 * finRadius)
self.ambAirTemp = ambAirTemp # degrees kelvin
self.maxJunctionTemp = maxJunctionTemp
self.maxSurfaceTemp = maxSurfaceTemp
"""
NOTE: in order to prevent ridiculously long variable names, all
Nusselt Numbers are abbreviated as follows:
nn = Nusselt Number
nn0 = Nusselt Number 0 (Diffusive Limit)
nnOut = Nusselt Number for outer surfaces
nnIn = Nusselt Number for inner surfaces
nnInT = Nusselt Number for the thin boundry layer of inner surface
nnInFD = Nusselt Number for fully developed regime inner surface
"""
# thermal diffusivity of air at atmospheric pressure at 25C
alpha = 22.39 * 10**(-6) # (meters^2) / seconds
# Volumetric coefficient of thermal expansion
beta = aluminium.expansionCoefficient # 1/kelvin
heatsinkSurfaceTemp = # TODO kelvin
# at atmospheric pressure at 25C
kinematicViscosity = 15.52 * 10**(-6) # meter^2/second
deltaT = heatsinkSurfaceTemp - ambAirTemp # kelvin
hLoD = self.heatsinkLength / self.overallDiameter
cDoD = self.cylinderDiameter / self.overallDiameter
oneChannelArea = (math.pi * (((self.overallDiameter**2
- self.cylinderDiameter**2) / 2)
+ (self.cylinderDiameter
* self.finSpacing)))
# area of circumscribed cylinder
areaCC = (math.pi * (((self.overallDiameter**2) / 2)
+ self.overallDiameter * self.heatsinkLength)) # meter^2
# inner surface area of heatsink
areaIn = (self.numberOfFins - 1) * oneChannelArea # meter^2
# outer surface area of heatsink
areaOut = (math.pi * (((self.overallDiameter**2) / 2)
+ (self.numberOfFins
* self.overallDiameter
* self.finThickness))) # meter^2
# overall area of heatsink
areaHS = areaIn + areaOut # meter^2
RayleighNbrFinSpacing = ((const.g
* beta
* deltaT
* self.finSpacing**4)
/ (kinematicViscosity
* alpha
* self.overallDiameter))
RayleighNbrOverallDiameter = ((const.g
* beta
* deltaT
* self.overallDiameter**3)
/ (kinematicViscosity * alpha))
if 0.1 <= hLoD <= 8:
self.nn0 = ((3.36 + (0.087 * hLoD))
* math.sqrt(areaCC)
* (self.finSpacing / areaHS)
)
if 0.1 <= (self.finThickness
* self.numberOfFins
/ self.overallDiameter) <= 8:
self.nnOut = ((0.499 - (0.026 * math.log(self.finThickness
* self.numberOfFins
/ self.overallDiameter)))
* math.pow(RayleighNbrFinSpacing, 0.25)
* (areaOut/areaHS)
)
if (0.1 <= cdoD <= 8) and (2.9 * 10**4
<= RayleighNbrOverallDiameter
<= 2.3 * 10**5):
nnInT = ((0.573-(0.184 * cdoD) + (0.0388 * cdoD**2))
* math.pow(RayleighNbrFinSpacing, 0.25))
nnInFD = (((0.0323
- (0.0517 * cdoD)
+ (0.11 * cdoD**2))
* math.pow(RayleighNbrFinSpacing, 0.25))
+ (0.0516 + (0.0154 * cdoD)
- (0.0433 * cdoD**2)
+ (0.0792 * cdoD**3)) * RayleighNbrFinSpacing)
n = 1
self.nnIn = (math.pow(math.pow(nnInT, -n)
+ math.pow(nnInFD, -n), (-1/n)
)
* (areaIn/areaHS)
)
self.nn = (self.nnIn + self.nnOut + self.nn0)
super(Child, self).__init__(material, self.__name__)
"""
Nusselt number = (Qconv * b) / (Ahs deltaT k)
Qconv = heat flow rate by convection (Watts)
b = finSpacing (meters)
Ahs = Area of heatsink (meter^2)
deltaT = temperature difference between surface temp of
heatsink and ambient air temp.
k = thermal conductivity of material (Watts / (meter kelvin))
"""
| 44.148649
| 78
| 0.520814
| 575
| 6,534
| 5.888696
| 0.34087
| 0.067336
| 0.014176
| 0.028352
| 0.124336
| 0.069699
| 0.039575
| 0
| 0
| 0
| 0
| 0.035732
| 0.396082
| 6,534
| 147
| 79
| 44.44898
| 0.822352
| 0.061371
| 0
| 0.091954
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006803
| 0
| 0
| null | null | 0
| 0.034483
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8acb71f44d08977a58d847a4d25a262b4cc3e603
| 35,471
|
py
|
Python
|
src/parser.py
|
harkiratbehl/PyGM
|
e0a4e0b865afb607dfa0525ca386bfbe77bb6508
|
[
"MIT"
] | 2
|
2019-02-13T11:30:08.000Z
|
2021-02-14T04:20:44.000Z
|
src/parser.py
|
harkiratbehl/PyGM
|
e0a4e0b865afb607dfa0525ca386bfbe77bb6508
|
[
"MIT"
] | null | null | null |
src/parser.py
|
harkiratbehl/PyGM
|
e0a4e0b865afb607dfa0525ca386bfbe77bb6508
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from code import TreeNode
from code import ThreeAddressCode
from lexer import tokens
from random import *
from symbol_table import SymbolTable
from symbol_table import SymbolTableNode
import logging
import ply.lex as lex
import ply.yacc as yacc
import sys
from codegen import convert_tac
from code import Code
from codegen import generate_assembly
three_addr_code = ThreeAddressCode()
assembly_code = Code()
parsed = []
symbol_table = SymbolTable()
var_list = []
generated = {'temp': [], 'scope': ['scope_0'], 'label': [], 'str_list': []}
def gen(s):
if s not in generated.keys():
generated[s] = []
temp = s + '_' + str(len(generated[s]))
generated[s] += [temp]
return temp
def print_error(err):
print "*** Error: " + err + "! ***"
sys.exit(1)
def check_variable(TreeNode):
# return 2 values. first is the name for the variable, second is 0 if variable not found
# TreeNode.print_node()
# symbol_table.print_symbol_table()
if TreeNode.isLvalue == 1:
if TreeNode.data not in generated['temp']:
name = symbol_table.search_identifier(TreeNode.data)
if name == False:
name = symbol_table.search_function(TreeNode.data)
if name == False:
print_error("Variable " + TreeNode.data + " is undefined")
return TreeNode.data
else:
return name
else:
newNode = SymbolTableNode(name, TreeNode.input_type)
symbol_table.add_var(newNode)
if TreeNode.children == []:
return name
else:
return name + '[' + TreeNode.children + ']'
else:
newNode = SymbolTableNode(TreeNode.data, TreeNode.input_type)
symbol_table.add_var(newNode)
return TreeNode.data
else:
if TreeNode.input_type != 'STRING':
return TreeNode.data
else:
TreeNode.print_node()
return TreeNode.data
precedence = (
('left','IDENTIFIER'),
('right','ASSIGN_OP'),
('left','COMMA'),
('left','LSQUARE'),
('left','RSQUARE'),
('left','LCURLY'),
('left','RCURLY'),
('left','DDD'),
('left','DOT'),
('left','SEMICOLON'),
('left','COLON'),
('left','SINGLE_QUOTES'),
('left','DOUBLE_QUOTES'),
('left','DECIMAL_LIT'),
('left','OCTAL_LIT'),
('left','HEX_LIT'),
('left','FLOAT_LIT'),
('left','STRING_LIT'),
('left','NEWLINE'),
('left','BREAK'),
('left','CONTINUE'),
('left','RETURN'),
('left','RROUND'),
('left','LROUND'),
('left', 'OR_OR'),
('left', 'AMP_AMP'),
('left', 'EQ_EQ', 'NOT_EQ','LT','LT_EQ','GT','GT_EQ'),
('left', 'PLUS', 'MINUS','OR','CARET'),
('left', 'STAR', 'DIVIDE','MODULO','AMP','AND_OR','LS','RS'),
)
def p_SourceFile(p):
'''SourceFile : PACKAGE IDENTIFIER SEMICOLON ImportDeclList TopLevelDeclList
'''
parsed.append(p.slice)
# TODO: Ignoring package name and Imports for now
p[0] = p[5]
var_list = symbol_table.make_var_list()
three_addr_code = convert_tac(p[0].TAC)
symbol_table.fill_next_use(three_addr_code)
assembly_code = generate_assembly(three_addr_code,var_list,symbol_table)
# p[0].TAC.print_code()
# three_addr_code.print_code()
assembly_code.print_code()
# symbol_table.print_symbol_table()
return
def p_ImportDeclList(p):
'''ImportDeclList : ImportDecl SEMICOLON ImportDeclList
| empty
'''
parsed.append(p.slice)
# TODO: Ignoring Imports for now
return
def p_TopLevelDeclList(p):
'''TopLevelDeclList : TopLevelDecl SEMICOLON TopLevelDeclList
| empty
'''
parsed.append(p.slice)
if len(p) == 4:
if p[3] != None:
p[0] = TreeNode('TopLevelDeclList', 0, 'INT', 0, [p[1]] + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
else:
p[0] = TreeNode('TopLevelDeclList', 0, 'INT', 0, [p[1]], p[1].TAC)
return
def p_TopLevelDecl(p):
'''TopLevelDecl : Declaration
| FunctionDecl
'''
parsed.append(p.slice)
p[0] = p[1]
return
def p_ImportDecl(p):
'''ImportDecl : IMPORT LROUND ImportSpecList RROUND
| IMPORT ImportSpec
'''
parsed.append(p.slice)
# TODO: Ignoring Imports for now
return
def p_ImportSpecList(p):
'''ImportSpecList : ImportSpec SEMICOLON ImportSpecList
| empty
'''
parsed.append(p.slice)
# TODO: Ignoring Imports for now
return
def p_ImportSpec(p):
'''ImportSpec : DOT string_lit
| IDENTIFIER string_lit
| empty string_lit
'''
parsed.append(p.slice)
# TODO: Ignoring Imports for now
return
def p_Block(p):
'''Block : LCURLY ScopeStart StatementList ScopeEnd RCURLY
'''
parsed.append(p.slice)
p[0] = p[3]
p[0].data = p[2].data
p[0].name = 'Block'
return
def p_ScopeStart(p):
'''ScopeStart : empty
'''
parsed.append(p.slice)
symbol_table.add_scope(gen('scope'))
p[0] = TreeNode('ScopeStart', symbol_table.current_scope, 'None')
return
def p_ScopeEnd(p):
'''ScopeEnd : empty
'''
parsed.append(p.slice)
symbol_table.end_scope()
return
def p_StatementList(p):
'''StatementList : Statement SEMICOLON StatementList
| empty
'''
parsed.append(p.slice)
if len(p) == 4:
p[0] = TreeNode('StatementList', 0, 'INT', 0, [p[1].data] + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
else:
p[0] = TreeNode('StatementList', 0, 'INT')
return
def p_Statement(p):
'''Statement : Declaration
| SimpleStmt
| ReturnStmt
| Block
| IfStmt
| SwitchStmt
| ForStmt
| BreakStmt
| ContinueStmt
| GotoStmt
| PrintIntStmt
| PrintStrStmt
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'Statement'
return
def p_PrintIntStmt(p):
'''PrintIntStmt : PRINTLN LROUND IDENTIFIER RROUND
| PRINTLN LROUND int_lit RROUND
'''
if hasattr(p[3], 'name') and p[3].name == 'int_lit':
p[0] = p[3]
# p[0].isLvalue = 0
else:
p[0] = TreeNode('IDENTIFIER', p[3], 'INT', 1, [])
p[0].TAC.add_line(['print_int', check_variable(p[0]), '', ''])
p[0].name = 'PrintIntStmt'
return
def p_PrintStrStmt(p):
'''PrintStrStmt : PRINTLN LROUND string_lit RROUND
'''
p[0] = p[3]
name = symbol_table.current_scope + '_' + gen('str_list')
parametersNode = SymbolTableNode(p[3].data, p[3].input_type)
newNode = SymbolTableNode(name, p[3].input_type, parameters = [parametersNode])
symbol_table.add_var(newNode)
p[0].TAC.add_line(['print_str', name, '', ''])
p[0].name = 'PrintStrStmt'
return
def p_Declaration(p):
'''Declaration : ConstDecl
| TypeDecl
| VarDecl
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'Declaration'
return
def p_ConstDecl(p):
'''ConstDecl : CONST LROUND ConstSpecList RROUND
| CONST ConstSpec
'''
parsed.append(p.slice)
return
def p_ConstSpecList(p):
'''ConstSpecList : empty
| ConstSpecList ConstSpec SEMICOLON
'''
parsed.append(p.slice)
return
def p_ConstSpec(p):
'''ConstSpec : IDENTIFIER
| IdentifierList
| IDENTIFIER EQ Expression
| IdentifierList EQ ExpressionList
| IDENTIFIER Type EQ Expression
| IdentifierList Type EQ ExpressionList
'''
parsed.append(p.slice)
return
def p_IdentifierList(p):
'''IdentifierList : IDENTIFIER COMMA IdentifierBotList
'''
parsed.append(p.slice)
node = TreeNode('IDENTIFIER', p[1], 'INT', 1)
p[0] = TreeNode('IdentifierList', 0, 'None', 0, [node] + p[3].children, p[3].TAC)
return
def p_IdentifierBotList(p):
'''IdentifierBotList : IDENTIFIER COMMA IdentifierBotList
| IDENTIFIER
'''
parsed.append(p.slice)
if len(p) == 2:
node = TreeNode('IDENTIFIER', p[1], 'INT', 1)
p[0] = TreeNode('IdentifierBotList', 0, 'None', 0, [node])
elif len(p) == 4:
node = TreeNode('IDENTIFIER', p[1], 'INT', 1)
p[0] = TreeNode('IdentifierBotList', 0, 'None', 0, [node] + p[3].children, p[3].TAC)
return
def p_ExpressionList(p):
'''ExpressionList : Expression COMMA ExpressionBotList
'''
parsed.append(p.slice)
p[0] = TreeNode('ExpressionList', 0, 'INT', 0, [p[1]] + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
return
def p_ExpressionBotList(p):
'''ExpressionBotList : Expression COMMA ExpressionBotList
| Expression
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = TreeNode('ExpressionBotList', 0, 'INT', 0, [p[1]], p[1].TAC)
elif len(p) == 4:
p[0] = TreeNode('ExpressionBotList', 0, 'INT', 0, [p[1]] + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
return
def p_TypeDecl(p):
'''TypeDecl : TYPE TypeSpecTopList
'''
parsed.append(p.slice)
return
def p_TypeSpecTopList(p):
'''TypeSpecTopList : TypeSpec
| LROUND TypeSpecList RROUND
'''
parsed.append(p.slice)
return
def p_TypeSpecList(p):
'''TypeSpecList : empty
| TypeSpecList TypeSpec SEMICOLON
'''
parsed.append(p.slice)
return
def p_TypeSpec(p):
'''TypeSpec : AliasDecl
| TypeDef
'''
parsed.append(p.slice)
return
def p_AliasDecl(p):
'''AliasDecl : IDENTIFIER EQ Type
'''
parsed.append(p.slice)
return
def p_TypeDef(p):
'''TypeDef : IDENTIFIER Type
'''
parsed.append(p.slice)
return
def p_Type(p):
'''Type : TypeLit
| StandardTypes
| LROUND Type RROUND
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
p[0].name = 'Type'
return
def p_StandardTypes(p):
'''StandardTypes : PREDEFINED_TYPES
'''
parsed.append(p.slice)
p[0] = TreeNode('StandardTypes', p[1], 'NONE')
return
def p_TypeLit(p):
'''TypeLit : ArrayType
| StructType
| FunctionType
| PointerType
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'TypeLit'
return
def p_PointerType(p):
'''PointerType : STAR Type
'''
parsed.append(p.slice)
return
def p_ArrayType(p):
'''ArrayType : LSQUARE ArrayLength RSQUARE Type
'''
parsed.append(p.slice)
p[0] = TreeNode('ArrayType', p[2].data, p[4].data)
return
def p_ArrayLength(p):
'''ArrayLength : Expression
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'ArrayLength'
return
def p_StructType(p):
'''StructType : STRUCT LCURLY FieldDeclList RCURLY
'''
parsed.append(p.slice)
return
def p_FieldDeclList(p):
'''FieldDeclList : empty
| FieldDeclList FieldDecl SEMICOLON
'''
parsed.append(p.slice)
return
def p_FieldDecl(p):
'''FieldDecl : IdentifierList Type TagTop
| IDENTIFIER Type TagTop
'''
parsed.append(p.slice)
return
def p_TagTop(p):
'''TagTop : empty
| Tag
'''
parsed.append(p.slice)
return
def p_Tag(p):
'''Tag : string_lit
'''
parsed.append(p.slice)
return
def p_FunctionType(p):
'''FunctionType : FUNC Signature
'''
parsed.append(p.slice)
return
def p_Signature(p):
'''Signature : Parameters
| Parameters Result
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'Signature'
s = 'scope_' + str(len(generated['scope']))
symbol_table.new_scope(s)
for child in p[1].children:
symbol_table.add_identifier(child, s)
newNode = SymbolTableNode(s + '_' + child.data, child.input_type)
symbol_table.add_var(newNode, s)
# symbol_table.print_symbol_table()
if len(p) == 2:
p[0].input_type = TreeNode('Result', 0, 'None')
else:
p[0].input_type = p[2]
return
def p_Result(p):
'''Result : Parameters
| Type
'''
parsed.append(p.slice)
if p[1].name == 'Type':
p[0] = TreeNode('Result', 1, 'None', 0, [p[1]])
else:
p[0] = p[1]
p[0].name = 'Result'
return
def p_Parameters(p):
'''Parameters : LROUND RROUND
| LROUND ParameterList RROUND
'''
parsed.append(p.slice)
if len(p) == 3:
p[0] = TreeNode('Parameters', 0, 'None')
else:
p[0] = p[2]
p[0].name = 'Parameters'
return
def p_ParameterList(p):
'''ParameterList : ParameterDecl
| ParameterList COMMA ParameterDecl
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
p[0].name = 'ParameterList'
elif len(p) == 4:
p[0] = TreeNode('ParameterList', p[1].data + p[3].data, 'None', 0, p[1].children + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
return
def p_ParameterDecl(p):
'''ParameterDecl : IdentifierList Type
| IDENTIFIER Type
| Type
'''
parsed.append(p.slice)
p[0] = TreeNode('ParameterDecl', 0, 'None')
if len(p) == 3:
if hasattr(p[1], 'name') and p[1].name == 'IdentifierList':
for node in p[1].children:
p[0].data += 1
node.input_type = p[2].data
p[0].children += [node]
else:
node = TreeNode('IDENTIFIER', p[1], p[2].data, 1)
p[0].data += 1
p[0].children += [node]
else:
p[0].data += 1
p[0].children += [p[1]]
return
def p_VarDecl(p):
'''VarDecl : VAR VarSpecTopList
'''
parsed.append(p.slice)
p[0] = p[2]
p[0].name = 'VarDecl'
return
def p_VarSpecTopList(p):
'''VarSpecTopList : VarSpec
| LROUND VarSpecList RROUND
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
p[0].name = 'VarSpecTopList'
return
def p_VarSpecList(p):
'''VarSpecList : empty
| VarSpecList VarSpec SEMICOLON
'''
return
def p_VarSpec(p):
'''VarSpec : IDENTIFIER Type
| IDENTIFIER EQ Expression
| IDENTIFIER Type EQ Expression
| IdentifierList Type
| IdentifierList EQ ExpressionList
| IdentifierList Type EQ ExpressionList
'''
# Insert into symbol table
p[0] = TreeNode('VarSpec', 0, 'NONE')
if hasattr(p[1], 'name') and p[1].name == 'IdentifierList':
zero_val = TreeNode('decimal_lit', 0, 'INT')
# l1 = len(p[1].children)
# if len(p) == 3:
# expr_list = TreeNode('Expr_List', 0, 'NONE', 0, [zero_val] * l1)
# elif len(p) == 4:
# expr_list = p[3]
# elif len(p) == 5:
# expr_list = p[4]
# l2 = len(expr_list.children)
# p[0].TAC.append_TAC(expr_list.TAC)
# p[0].TAC.append_TAC(p[1].TAC)
# if l1 == l2:
# for i in range(l1):
# p[0].TAC.add_line(['=', p[1].children[i], expr_list.children[i].data, ''])
# else:
# print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)")
else:
p[1] = TreeNode('IDENTIFIER',p[1],'INT',1)
if p[2].input_type != 'NONE':
# array case
# p[2].print_node()
if symbol_table.add_identifier(p[1], size = p[2].data) == False:
print_error("Unable to add to SymbolTable")
return
name = symbol_table.search_identifier(p[1].data)
newNode = SymbolTableNode(name, p[1].input_type,size = p[2].data)
symbol_table.add_var(newNode)
p[0] = TreeNode('VarSpec',p[1].data,'INT')
# expr = TreeNode('Expr', 0, 'NONE')
# if len(p) == 4:
# expr = p[3]
# p[0].TAC.append_TAC(p[3].TAC)
# p[0].TAC.add_line(['=', check_variable(p[1]), check_variable(expr), ''])
# elif len(p) == 5:
# expr = p[4]
# p[0].TAC.append_TAC(p[4].TAC)
# p[0].TAC.add_line(['=', check_variable(p[1]), check_variable(expr), ''])
return
def p_FunctionDecl(p):
'''FunctionDecl : FUNC FunctionName Signature
| FUNC FunctionName Signature FunctionBody
'''
parsed.append(p.slice)
# symbol_table.print_symbol_table()
p[0] = TreeNode('FunctionDecl', 0, 'INT')
# print symbol_table.current_scope
# p[4].TAC.print_code()
symbol_table.add_function(p[2].data, p[3].input_type, p[3].children)
if len(p) == 5:
noOfParams = 0
for f in symbol_table.symbol_table[symbol_table.current_scope]['functions']:
if f.name == p[2].data:
noOfParams = len(f.parameters)
p[0].TAC.add_line(['func', check_variable(p[2]), str(noOfParams), ''])
for child in reversed(p[3].children):
p[0].TAC.add_line(['getparam', p[4].data + '_' + child.data, '', ''])
p[0].TAC.add_line(['stack_push', '', '', ''])
p[0].TAC.append_TAC(p[4].TAC)
return
def p_FunctionName(p):
'''FunctionName : IDENTIFIER
'''
parsed.append(p.slice)
p[0] = TreeNode('FunctionName', p[1], 'INT', 1)
return
def p_FunctionBody(p):
'''FunctionBody : Block
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'FunctionBody'
return
def p_SimpleStmt(p):
'''SimpleStmt : Expression
| Assignment
| ShortVarDecl
| IncDecStmt
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'SimpleStmt'
return
def p_IncDecStmt(p):
'''IncDecStmt : Expression PLUS_PLUS
| Expression MINUS_MINUS
'''
parsed.append(p.slice)
one_val = TreeNode('IncDecStmt', '1', 'INT')
p[0] = p[1]
if p[1].isLvalue == 1:
if p[2] == '++':
p[0].TAC.add_line(['+', check_variable(p[1]), check_variable(p[1]), one_val.data])
else:
p[0].TAC.add_line(['-', check_variable(p[1]), check_variable(p[1]), one_val.data])
else:
print_error("Lvalue required")
p[0].name = 'IncDecStmt'
return
def p_ShortVarDecl(p):
'''ShortVarDecl : ExpressionList ASSIGN_OP ExpressionList
| Expression ASSIGN_OP Expression
'''
parsed.append(p.slice)
# TODO: Add in symbol table
p[0] = TreeNode('ShortVarDecl', 0, 'INT')
if p[1].name == 'ExpressionList':
l1 = len(p[1].children)
l2 = len(p[3].children)
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.append_TAC(p[1].TAC)
if l1 == l2:
for i in range(l1):
if p[1].children[i].isLvalue == 0:
print_error("Lvalue required")
return
else:
if symbol_table.add_identifier(p[1].children[i]) == False:
print_error("Unable to add to SymbolTable")
return
p[0].TAC.add_line([p[2], check_variable(p[1].children[i]), check_variable(p[3].children[i]), ''])
else:
print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)")
elif p[1].name == 'Expression':
if p[1].isLvalue == 0:
print_error("Lvalue required")
return
else:
if symbol_table.add_identifier(p[1]) == False:
print_error("Unable to add to SymbolTable")
return
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.append_TAC(p[1].TAC)
p[0].TAC.add_line([p[2], check_variable(p[1]), check_variable(p[3]), ''])
return
def p_Assignment(p):
'''Assignment : ExpressionList assign_op ExpressionList
| Expression assign_op Expression
'''
parsed.append(p.slice)
p[0] = TreeNode('Assignment', 0, 'INT')
if p[1].name == 'ExpressionList':
l1 = len(p[1].children)
l2 = len(p[3].children)
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.append_TAC(p[1].TAC)
if l1 == l2:
for i in range(l1):
if p[1].children[i].isLvalue == 0:
print_error("Lvalue required")
return
else:
if symbol_table.search_identifier(p[1].children[i].data) == False and p[1].children[i].data not in generated['temp']:
print_error("Variable " + p[1].children[i].data + " is undefined")
return
if p[3].children[i].isLvalue == 1 and symbol_table.search_identifier(p[3].children[i].data) == False and p[3].children[i].data not in generated['temp']:
print_error("Variable " + p[3].children[i].data + " is undefined")
return
p[0].TAC.add_line([p[2].data, check_variable(p[1].children[i]), check_variable(p[3].children[i]), ''])
else:
print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)")
elif p[1].name == 'Expression':
if p[1].isLvalue == 0:
print_error("Lvalue required")
return
else:
if symbol_table.search_identifier(p[1].data) == False and p[1].data not in generated['temp']:
print_error("Variable " + p[1].data + " is undefined")
return
if p[3].isLvalue == 1 and symbol_table.search_identifier(p[3].data) == False and p[3].data not in generated['temp']:
print_error("Variable " + p[3].data + " is undefined")
return
# print symbol_table.current_scope
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.append_TAC(p[1].TAC)
p[0].TAC.add_line([p[2].data, check_variable(p[1]), check_variable(p[3]), ''])
return
def p_assign_op(p):
'''assign_op : EQ
| PLUS_EQ
| MINUS_EQ
| OR_EQ
| CARET_EQ
| STAR_EQ
| DIVIDE_EQ
| MODULO_EQ
| LS_EQ
| RS_EQ
| AMP_EQ
| AND_OR_EQ
'''
parsed.append(p.slice)
p[0] = TreeNode('assign_op', p[1], 'OPERATOR')
return
def p_IfStmt(p):
'''IfStmt : IF Expression Block
| IF Expression Block ELSE elseTail
'''
parsed.append(p.slice)
if len(p) == 4:
l1 = gen('label')
p[0] = TreeNode('IfStmt', 0, 'INT')
p[0].TAC.append_TAC(p[2].TAC)
p[0].TAC.add_line(['ifgotoeq', check_variable(p[2]), '0', l1])
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.add_line(['label', l1, '', ''])
if len(p) == 6:
l1 = gen('label')
l2 = gen('label')
p[0] = TreeNode('IfStmt', 0, 'INT')
p[0].TAC.append_TAC(p[2].TAC)
p[0].TAC.add_line(['ifgotoeq', check_variable(p[2]), '0', l1])
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.add_line(['goto', l2, '', ''])
p[0].TAC.add_line(['label', l1, '', ''])
p[0].TAC.append_TAC(p[5].TAC)
p[0].TAC.add_line(['label', l2, '', ''])
return
def p_elseTail(p):
'''elseTail : IfStmt
| Block
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'elseTail'
return
def p_SwitchStmt(p):
'''SwitchStmt : ExprSwitchStmt
'''
parsed.append(p.slice)
p[0] = TreeNode('SwitchStmt', 0, 'INT', 0, [], p[1].TAC)
return
def p_ExprSwitchStmt(p):
'''ExprSwitchStmt : SWITCH SimpleStmt SEMICOLON LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY
| SWITCH SimpleStmt SEMICOLON Expression LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY
| SWITCH LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY
| SWITCH Expression LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY
'''
parsed.append(p.slice)
if len(p) == 8:
l1 = gen('label')
l2 = gen('label')
p[0] = TreeNode('ExprSwitchStmt', 0, 'INT')
p[0].TAC.append_TAC(p[2].TAC)
t1 = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1)
p[0].TAC.add_line(['=', check_variable(t1) , check_variable(p[2]), ''])
p[0].TAC.append_TAC(p[5].data)
for i in range(len(p[5].children)):
p[0].TAC.add_line(['ifgotoeq', check_variable(t1), p[5].children[i][0], p[5].children[i][1]])
p[0].TAC.add_line(['goto', l2, '', ''])
for i in range(p[5].TAC.length()):
if i in p[5].TAC.leaders[1:]:
p[0].TAC.add_line(['goto', l2, '', ''])
p[0].TAC.add_line(p[5].TAC.code[i])
p[0].TAC.add_line(['label', l2, '', ''])
return
def p_ExprCaseClauseList(p):
'''ExprCaseClauseList : empty
| ExprCaseClauseList ExprCaseClause
'''
parsed.append(p.slice)
TAC1 = ThreeAddressCode()
TAC2 = ThreeAddressCode()
if len(p) == 3:
TAC1 = p[1].data
TAC2 = p[2].data
p[0] = TreeNode('ExprCaseClauseList', TAC1, 'INT', 0, p[1].children + p[2].children, p[1].TAC)
p[0].TAC.add_leader(p[0].TAC.length())
p[0].TAC.append_TAC(p[2].TAC)
p[0].data.append_TAC(TAC2)
else:
p[0] = TreeNode('ExprCaseClauseList', TAC1, 'INT')
return
def p_ExprCaseClause(p):
'''ExprCaseClause : ExprSwitchCase COLON StatementList
'''
parsed.append(p.slice)
l1 = gen('label')
p[0] = TreeNode('ExprCaseClause', 0, 'INT')
# p[0].TAC.append_TAC(p[1].TAC)
p[0].TAC.add_line(['label', l1, '', ''])
# p[0].TAC.add_line(['ifgotoneq', p[1].children, p[1].children, l1])
p[0].TAC.append_TAC(p[3].TAC)
p[0].children = [[p[1].data,l1]]
p[0].data = p[1].TAC
return
def p_ExprSwitchCase(p):
'''ExprSwitchCase : CASE ExpressionList
| DEFAULT
| CASE Expression
'''
parsed.append(p.slice)
p[0] = TreeNode('ExprSwitchCase', 0, 'INT')
if len(p) == 3:
p[0].data = p[2].data
p[0].TAC = p[2].TAC
return
def p_ForStmt(p):
'''ForStmt : FOR Expression Block
| FOR Block
'''
parsed.append(p.slice)
p[0] = TreeNode('ForStmt', 0, 'INT')
if len(p) == 4:
l1 = gen('label')
l2 = gen('label')
p[0].TAC.add_line(['label', l1, '', ''])
p[0].TAC.append_TAC(p[2].TAC)
p[0].TAC.add_line(['ifgotoeq',check_variable(p[2]), '0', l2])
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.add_line(['goto', l1, '', ''])
p[0].TAC.add_line(['label', l2, '', ''])
if len(p) == 3:
l1 = gen('label')
# l2 = gen('label')
p[0].TAC.add_line(['label', l1, '', ''])
p[0].TAC.append_TAC(p[2].TAC)
p[0].TAC.add_line(['goto', l1, '', ''])
# p[0].TAC.add_line([l2])
return
def p_ReturnStmt(p):
'''ReturnStmt : RETURN
| RETURN Expression
| RETURN ExpressionList
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = TreeNode('ReturnStmt', 0, 'None')
p[0].TAC.add_line(['return', '', '', ''])
if len(p) == 3:
if p[2].name == 'Expression':
p[0] = p[2]
p[0].name = 'ReturnStmt'
p[0].TAC.add_line(['return', check_variable(p[2]), '', ''])
return
def p_BreakStmt(p):
'''BreakStmt : BREAK IDENTIFIER
'''
parsed.append(p.slice)
return
def p_ContinueStmt(p):
'''ContinueStmt : CONTINUE IDENTIFIER
'''
parsed.append(p.slice)
return
def p_GotoStmt(p):
'''GotoStmt : GOTO IDENTIFIER
'''
parsed.append(p.slice)
return
def p_Expression(p):
'''Expression : UnaryExpr
| Expression OR_OR Expression
| Expression AMP_AMP Expression
| Expression EQ_EQ Expression
| Expression NOT_EQ Expression
| Expression LT Expression
| Expression LT_EQ Expression
| Expression GT Expression
| Expression GT_EQ Expression
| Expression PLUS Expression
| Expression MINUS Expression
| Expression OR Expression
| Expression CARET Expression
| Expression STAR Expression
| Expression DIVIDE Expression
| Expression MODULO Expression
| Expression LS Expression
| Expression RS Expression
| Expression AMP Expression
| Expression AND_OR Expression
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1, [], p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.add_line([p[2],check_variable(p[0]), check_variable(p[1]), check_variable(p[3])])
p[0].name = 'Expression'
return
def p_UnaryExpr(p):
'''UnaryExpr : PrimaryExpr
| unary_op UnaryExpr
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1)
p[0].TAC.add_line([check_variable(p[1]), check_variable(p[0]), check_variable(p[2]), ''])
p[0].name = 'UnaryExpr'
return
def p_unary_op(p):
'''unary_op : PLUS
| MINUS
| NOT
| CARET
| STAR
| AMP
| LT_MINUS
'''
parsed.append(p.slice)
p[0] = TreeNode('unary_op', p[1], 'OPERATOR')
return
def p_PrimaryExpr(p):
'''PrimaryExpr : Operand
| IDENTIFIER
| PrimaryExpr Selector
| PrimaryExpr Index
| PrimaryExpr Arguments
'''
parsed.append(p.slice)
if len(p) == 2:
if p.slice[1].type == 'IDENTIFIER':
p[0] = TreeNode('IDENTIFIER', p[1], 'INT', 1)
elif p[1].name == 'Operand':
p[0] = p[1]
elif len(p) == 3:
if p[2].name == 'Index':
p[0] = TreeNode('IDENTIFIER', p[1].data, 'INT', 1, p[2].data)
elif p[2].name == 'Arguments':
p[0] = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1)
p[0].TAC.append_TAC(p[1].TAC)
p[0].TAC.append_TAC(p[2].TAC)
# p[1].print_node()
func = check_variable(p[1]).split("_")
scope, funcName = "_".join(func[:2]), "_".join(func[2:])
temp = 0
for f in symbol_table.symbol_table[scope]['functions']:
if f.name == funcName:
temp = len(f.parameters)
# p[2].print_node()
for child in p[2].children:
p[0].TAC.add_line(['putparam', check_variable(child), '', ''])
if temp != p[2].data:
print_error('Function ' + funcName + ' requires ' + str(temp) + ' parameters but ' + str(p[2].data) + ' supplied')
p[0].TAC.add_line(['call', check_variable(p[1]), str(p[2].data), ''])
p[0].TAC.add_line(['return_value', check_variable(p[0]), '', ''])
p[0].name = 'PrimaryExpr'
return
def p_Operand(p):
'''Operand : Literal
| LROUND Expression RROUND
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
p[0].name = 'Operand'
return
def p_Literal(p):
'''Literal : BasicLit
| FunctionLit
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'Literal'
return
def p_BasicLit(p):
'''BasicLit : int_lit
| float_lit
| string_lit
| rune_lit
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'BasicLit'
return
def p_int_lit(p):
'''int_lit : decimal_lit
| octal_lit
| hex_lit
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'int_lit'
return
def p_decimal_lit(p):
'''decimal_lit : DECIMAL_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('decimal_lit', p[1], 'INT')
return
def p_octal_lit(p):
'''octal_lit : OCTAL_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('octal_lit', p[1], 'OCT')
return
def p_hex_lit(p):
'''hex_lit : HEX_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('hex_lit', p[1], 'HEX')
return
def p_float_lit(p):
'''float_lit : FLOAT_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('float_lit', p[1], 'FLOAT')
return
def p_FunctionLit(p):
'''FunctionLit : FUNC Signature FunctionBody
'''
parsed.append(p.slice)
# Anonymous Function
# Not implemented yet
return
def p_Selector(p):
'''Selector : DOT IDENTIFIER
'''
parsed.append(p.slice)
return
def p_Index(p):
'''Index : LSQUARE Expression RSQUARE
'''
parsed.append(p.slice)
p[0] = p[2]
p[0].name = 'Index'
return
def p_Arguments(p):
'''Arguments : LROUND RROUND
| LROUND ExpressionList RROUND
| LROUND Expression RROUND
| LROUND Type RROUND
| LROUND Type COMMA ExpressionList RROUND
| LROUND Type COMMA Expression RROUND
'''
# print p.slice
parsed.append(p.slice)
if len(p) == 3:
p[0] = TreeNode('Arguments', 0, 'None')
if len(p) == 4:
if p[2].name == 'Expression':
p[0] = TreeNode('Arguments', 1, 'None', 0, [p[2]], p[2].TAC)
if p[2].name == 'ExpressionList':
p[0] = p[2]
p[0].name = 'Arguments'
p[0].data = len(p[2].children)
return
def p_string_lit(p):
'''string_lit : STRING_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('string_lit', p[1], 'STRING')
return
def p_rune_lit(p):
'''rune_lit : RUNE_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('rune_lit', p[1], 'RUNE')
return
def p_empty(p):
'empty :'
pass
def p_error(p):
print p
if p == None:
print str(sys.argv[1]) + " :: You missed something at the end"
else:
print str(sys.argv[1]) + " :: Syntax error in line no " + str(p.lineno)
# Standard Logger
logging.basicConfig(
level = logging.DEBUG,
filename = "parselog.txt",
filemode = "w",
format = "%(filename)10s:%(lineno)4d:%(message)s"
)
log = logging.getLogger()
yacc.yacc(debug=True, debuglog=log)
input_file = sys.argv[1]
import os
if os.path.isfile(input_file) is False:
print('Input file ' + input_file + ' does not exist')
sys.exit(1)
input_code = open(input_file, 'r').read()
if input_code[len(input_code)-1] != '\n':
input_code += '\n'
yacc.parse(input_code, debug=log, tracking=True)
| 29.050778
| 172
| 0.5361
| 4,440
| 35,471
| 4.178153
| 0.072072
| 0.02264
| 0.046898
| 0.080535
| 0.482076
| 0.438251
| 0.37529
| 0.314107
| 0.258854
| 0.240149
| 0
| 0.026433
| 0.305686
| 35,471
| 1,220
| 173
| 29.07459
| 0.726815
| 0.048744
| 0
| 0.511502
| 0
| 0
| 0.103066
| 0.001517
| 0
| 0
| 0
| 0.004918
| 0
| 0
| null | null | 0.001353
| 0.024357
| null | null | 0.035183
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8ad27d34811f9ef90b1af846c18b262998179e76
| 1,523
|
py
|
Python
|
tests/generation_test.py
|
stefan-feltmann/lands
|
b2f1fc3aab4895763160a135d085a17dceb5f58e
|
[
"MIT"
] | null | null | null |
tests/generation_test.py
|
stefan-feltmann/lands
|
b2f1fc3aab4895763160a135d085a17dceb5f58e
|
[
"MIT"
] | null | null | null |
tests/generation_test.py
|
stefan-feltmann/lands
|
b2f1fc3aab4895763160a135d085a17dceb5f58e
|
[
"MIT"
] | null | null | null |
import unittest
from worldengine.plates import Step, center_land, world_gen
from worldengine.world import World
from tests.draw_test import TestBase
class TestGeneration(TestBase):
def setUp(self):
super(TestGeneration, self).setUp()
def test_world_gen_does_not_explode_badly(self):
# FIXME remove me when proper tests are in place
# Very stupid test that just verify nothing explode badly
world_gen("Dummy", 32, 16, 1, step=Step.get_by_name("full"))
@staticmethod
def _mean_elevation_at_borders(world):
borders_total_elevation = 0.0
for y in range(world.height):
borders_total_elevation += world.elevation_at((0, y))
borders_total_elevation += world.elevation_at((world.width - 1, y))
for x in range(1, world.width - 1):
borders_total_elevation += world.elevation_at((x, 0))
borders_total_elevation += world.elevation_at((x, world.height - 1))
n_cells_on_border = world.width * 2 + world.height * 2 - 4
return borders_total_elevation / n_cells_on_border
def test_center_land(self):
w = World.from_pickle_file("%s/plates_279.world" % self.tests_data_dir)
# We want to have less land than before at the borders
el_before = TestGeneration._mean_elevation_at_borders(w)
center_land(w)
el_after = TestGeneration._mean_elevation_at_borders(w)
self.assertTrue(el_after <= el_before)
if __name__ == '__main__':
unittest.main()
| 35.418605
| 80
| 0.690085
| 211
| 1,523
| 4.668246
| 0.407583
| 0.078173
| 0.127919
| 0.105584
| 0.227411
| 0.227411
| 0.077157
| 0
| 0
| 0
| 0
| 0.016034
| 0.22193
| 1,523
| 42
| 81
| 36.261905
| 0.81519
| 0.101773
| 0
| 0
| 0
| 0
| 0.026393
| 0
| 0
| 0
| 0
| 0.02381
| 0.035714
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
76d2dd0a16c26b25219d0d5220bf5e490de12769
| 1,627
|
py
|
Python
|
run.py
|
Bioconductor/bioc_git_transition
|
9ca29f9e8058b755163e12bf9324ec1063d0182d
|
[
"MIT"
] | 16
|
2017-03-15T18:00:35.000Z
|
2018-07-30T14:44:53.000Z
|
run.py
|
Bioconductor/bioc_git_transition
|
9ca29f9e8058b755163e12bf9324ec1063d0182d
|
[
"MIT"
] | 40
|
2017-03-29T20:04:25.000Z
|
2019-10-21T16:56:15.000Z
|
run.py
|
Bioconductor/bioc_git_transition
|
9ca29f9e8058b755163e12bf9324ec1063d0182d
|
[
"MIT"
] | 4
|
2017-05-08T11:39:07.000Z
|
2017-08-17T14:18:03.000Z
|
"""Bioconductor run git transition code.
This module assembles the classes for the SVN --> Git transition
can be run in a sequential manner.
It runs the following aspects fo the Bioconductor transition.
Note: Update the SVN dump
1. Run Bioconductor Software package transition
2. Run Bioconductor Experiment Data package transition
3. Run Workflow package transition
4. Run Manifest file transition
5. Run Rapid update of master (trunk) and RELEASE_3_5 branches on
software packages
Manual tasks which need to be done:
1. Copy over bare repos to repositories/packages
2. Copy manifest bare git repo to repositories/admin
"""
import src.run_transition as rt
import src.svn_dump_update as sdu
import logging
import time
logging.basicConfig(filename='transition.log',
format='%(levelname)s %(asctime)s %(message)s',
level=logging.DEBUG)
def svn_dump_update(config_file):
sdu.svn_root_update(config_file)
sdu.svn_experiment_root_update(config_file)
return
def run(config_file):
rt.run_software_transition(config_file, new_svn_dump=True)
rt.run_experiment_data_transition(config_file, new_svn_dump=True)
rt.run_workflow_transition(config_file, new_svn_dump=True)
rt.run_manifest_transition(config_file, new_svn_dump=True)
return
if __name__ == '__main__':
start_time = time.time()
config_file = "./settings.ini"
svn_dump_update(config_file)
run(config_file)
# TODO: Run updates after dump update
svn_dump_update(config_file)
rt.run_updates(config_file)
logging.info("--- %s seconds ---" % (time.time() - start_time))
| 30.12963
| 69
| 0.754149
| 239
| 1,627
| 4.895397
| 0.393305
| 0.111111
| 0.068376
| 0.078632
| 0.211966
| 0.12906
| 0.12906
| 0.1
| 0.1
| 0
| 0
| 0.006647
| 0.167793
| 1,627
| 53
| 70
| 30.698113
| 0.857459
| 0.406884
| 0
| 0.16
| 0
| 0
| 0.09499
| 0
| 0
| 0
| 0
| 0.018868
| 0
| 1
| 0.08
| false
| 0
| 0.16
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
76d437c1b037e1c3fe1a171bd9eb231c53d36fc1
| 645
|
py
|
Python
|
projectparallelprogrammeren/codesimulatie.py
|
fury106/ProjectParallelProgrammeren
|
fd3c198edaca5bcb19d8e665561e8cd14824e894
|
[
"MIT"
] | null | null | null |
projectparallelprogrammeren/codesimulatie.py
|
fury106/ProjectParallelProgrammeren
|
fd3c198edaca5bcb19d8e665561e8cd14824e894
|
[
"MIT"
] | null | null | null |
projectparallelprogrammeren/codesimulatie.py
|
fury106/ProjectParallelProgrammeren
|
fd3c198edaca5bcb19d8e665561e8cd14824e894
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Module projectparallelprogrammeren.codesimulatie
=================================================================
Deze module simuleert alles.
"""
import projectparallelprogrammeren
def simulatie():
"""
Deze functie voert alle versies uit zodat deze vergeleken kunnen worden qua timing.
"""
from importlib import import_module
for i in range(4):
#alle versies van de simulatie importeren en achtereenvolgens uitvoeren.
version = f"montecarlo_v{i}"
montecarlo = import_module(version)
montecarlo.simulatie(100,50) #Deze waarden dienen enkel als test
if __name__ == "__main__":
simulatie()
#eof
| 23.035714
| 84
| 0.674419
| 70
| 645
| 6.057143
| 0.728571
| 0.051887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012658
| 0.142636
| 645
| 27
| 85
| 23.888889
| 0.754069
| 0.55814
| 0
| 0
| 0
| 0
| 0.087121
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.333333
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
76d4b9d4643322713c59c30a22d968f034c3d591
| 2,361
|
py
|
Python
|
test/test_aes.py
|
haruhi-dl/haruhi-dl
|
0526e2add4c263209cad55347efa9a2dfe6c3fa6
|
[
"Unlicense"
] | 32
|
2021-01-18T03:52:17.000Z
|
2022-02-17T20:43:39.000Z
|
test/test_aes.py
|
haruhi-dl/haruhi-dl
|
0526e2add4c263209cad55347efa9a2dfe6c3fa6
|
[
"Unlicense"
] | 12
|
2021-02-06T08:12:08.000Z
|
2021-12-11T23:17:41.000Z
|
test/test_aes.py
|
haruhi-dl/haruhi-dl
|
0526e2add4c263209cad55347efa9a2dfe6c3fa6
|
[
"Unlicense"
] | 6
|
2021-01-29T16:46:31.000Z
|
2022-01-20T18:40:03.000Z
|
#!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from haruhi_dl.aes import aes_decrypt, aes_encrypt, aes_cbc_decrypt, aes_cbc_encrypt, aes_decrypt_text
from haruhi_dl.utils import bytes_to_intlist, intlist_to_bytes
import base64
# the encrypted data can be generate with 'devscripts/generate_aes_testdata.py'
class TestAES(unittest.TestCase):
def setUp(self):
self.key = self.iv = [0x20, 0x15] + 14 * [0]
self.secret_msg = b'Secret message goes here'
def test_encrypt(self):
msg = b'message'
key = list(range(16))
encrypted = aes_encrypt(bytes_to_intlist(msg), key)
decrypted = intlist_to_bytes(aes_decrypt(encrypted, key))
self.assertEqual(decrypted, msg)
def test_cbc_decrypt(self):
data = bytes_to_intlist(
b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd"
)
decrypted = intlist_to_bytes(aes_cbc_decrypt(data, self.key, self.iv))
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
def test_cbc_encrypt(self):
data = bytes_to_intlist(self.secret_msg)
encrypted = intlist_to_bytes(aes_cbc_encrypt(data, self.key, self.iv))
self.assertEqual(
encrypted,
b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd")
def test_decrypt_text(self):
password = intlist_to_bytes(self.key).decode('utf-8')
encrypted = base64.b64encode(
intlist_to_bytes(self.iv[:8])
+ b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae'
).decode('utf-8')
decrypted = (aes_decrypt_text(encrypted, password, 16))
self.assertEqual(decrypted, self.secret_msg)
password = intlist_to_bytes(self.key).decode('utf-8')
encrypted = base64.b64encode(
intlist_to_bytes(self.iv[:8])
+ b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83'
).decode('utf-8')
decrypted = (aes_decrypt_text(encrypted, password, 32))
self.assertEqual(decrypted, self.secret_msg)
if __name__ == '__main__':
unittest.main()
| 36.890625
| 102
| 0.671326
| 339
| 2,361
| 4.448378
| 0.368732
| 0.047745
| 0.074271
| 0.047745
| 0.451592
| 0.351459
| 0.302387
| 0.259947
| 0.259947
| 0.193634
| 0
| 0.063158
| 0.195256
| 2,361
| 63
| 103
| 37.47619
| 0.730526
| 0.051249
| 0
| 0.212766
| 0
| 0.085106
| 0.171658
| 0.143496
| 0
| 0
| 0.003576
| 0
| 0.106383
| 1
| 0.106383
| false
| 0.085106
| 0.148936
| 0
| 0.276596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
76dba06432c777d52082f512eea09a2187e28998
| 201
|
py
|
Python
|
app/retweet_graphs_v2/prep/migrate_daily_bot_probabilities.py
|
s2t2/tweet-analyzer-py
|
0a398fc47101a2d602d8c4116c970f1076a58f27
|
[
"MIT"
] | 5
|
2020-04-02T12:03:57.000Z
|
2020-10-18T19:29:15.000Z
|
app/retweet_graphs_v2/prep/migrate_daily_bot_probabilities.py
|
s2t2/tweet-analyzer-py
|
0a398fc47101a2d602d8c4116c970f1076a58f27
|
[
"MIT"
] | 22
|
2020-03-31T02:00:34.000Z
|
2021-06-30T17:59:01.000Z
|
app/retweet_graphs_v2/prep/migrate_daily_bot_probabilities.py
|
s2t2/tweet-analyzer-py
|
0a398fc47101a2d602d8c4116c970f1076a58f27
|
[
"MIT"
] | 3
|
2020-04-04T16:08:08.000Z
|
2020-10-20T01:32:46.000Z
|
from app.bq_service import BigQueryService
if __name__ == "__main__":
bq_service = BigQueryService()
bq_service.migrate_daily_bot_probabilities_table()
print("MIGRATION SUCCESSFUL!")
| 16.75
| 54
| 0.756219
| 22
| 201
| 6.227273
| 0.772727
| 0.19708
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159204
| 201
| 11
| 55
| 18.272727
| 0.810651
| 0
| 0
| 0
| 0
| 0
| 0.145729
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.2
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
76dc3dcc93cf6f1c271c8e612a3e064f4f02ee56
| 3,258
|
py
|
Python
|
tests/bugs/core_6266_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/bugs/core_6266_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/bugs/core_6266_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
#coding:utf-8
#
# id: bugs.core_6266
# title: Deleting records from MON$ATTACHMENTS using ORDER BY clause doesn't close the corresponding attachments
# decription:
# Old title: Don't close attach while deleting record from MON$ATTACHMENTS using ORDER BY clause.
# Confirmed bug on 3.0.6.33271.
# Checked on 3.0.6.33272 (SS/CS) - works fine.
# 22.04.2020. Checked separately on 4.0.0.1931 SS/CS: all OK. FB 4.0 can also be tested since this build.
#
# tracker_id: CORE-6266
# min_versions: ['3.0.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
# import os
# import sys
# import time
# import fdb
#
# ATT_CNT=5
# ATT_DELAY=1
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
# db_conn.close()
#
# con_list={}
# for i in range(0, ATT_CNT):
# if i > 0:
# time.sleep( ATT_DELAY )
#
# c = fdb.connect(dsn = dsn)
# a = c.attachment_id
# con_list[ i ] = (a, c)
# # print('created attachment ', (a,c) )
#
# con_admin = con_list[0][1]
#
# #print(con_admin.firebird_version)
#
# # this removes ALL connections --> should NOT be used for reproducing ticket issue:
# #con_admin.execute_immediate('delete from mon$attachments where mon$attachment_id != current_connection order by mon$timestamp')
#
# # this removes ALL connections --> should NOT be used for reproducing ticket issue:
# #con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1 and mon$attachment_id != current_connection order by mon$timestamp')
#
# # This DOES NOT remove all attachments (only 'last' in order of timestamp), but
# # DELETE statement must NOT contain phrase 'mon$attachment_id != current_connection':
# con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1 order by mon$timestamp')
#
# con_admin.commit()
#
# cur_admin = con_admin.cursor()
# cur_admin.execute('select mon$attachment_id,mon$user from mon$attachments where mon$system_flag is distinct from 1 and mon$attachment_id != current_connection' )
# i=0
# for r in cur_admin:
# print( '### ACHTUNG ### STILL ALIVE ATTACHMENT DETECTED: ', r[0], r[1].strip(), '###' )
# i += 1
# print('Number of attachments that remains alive: ',i)
#
# cur_admin.close()
#
# #print('Final cleanup before quit from Python.')
#
# for k,v in sorted( con_list.items() ):
# #print('attempt to close attachment ', v[0] )
# try:
# v[1].close()
# #print('done.')
# except Exception as e:
# pass
# #print('Got exception:', sys.exc_info()[0])
# #print(e[0])
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
Number of attachments that remains alive: 0
"""
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
| 31.028571
| 170
| 0.645795
| 465
| 3,258
| 4.380645
| 0.389247
| 0.027491
| 0.053019
| 0.045164
| 0.350515
| 0.334806
| 0.300442
| 0.265096
| 0.265096
| 0.265096
| 0
| 0.032514
| 0.225905
| 3,258
| 104
| 171
| 31.326923
| 0.775178
| 0.832413
| 0
| 0
| 0
| 0
| 0.175
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.