hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
17082a1780c9b1d5cefa05b73988108b35ec786d
| 7,699 |
py
|
Python
|
test/test_simplemusic.py
|
eric-brechemier/pyknon
|
14dfe74c95a271c5bb98841ef3b0dbfa10d55128
|
[
"MIT"
] | 184 |
2015-02-04T15:19:32.000Z
|
2022-03-13T06:39:12.000Z
|
test/test_simplemusic.py
|
eric-brechemier/pyknon
|
14dfe74c95a271c5bb98841ef3b0dbfa10d55128
|
[
"MIT"
] | 15 |
2015-03-28T01:12:15.000Z
|
2021-04-26T19:25:39.000Z
|
test/test_simplemusic.py
|
eric-brechemier/pyknon
|
14dfe74c95a271c5bb98841ef3b0dbfa10d55128
|
[
"MIT"
] | 38 |
2015-03-09T20:38:27.000Z
|
2022-03-07T12:42:25.000Z
|
from __future__ import division
from fractions import Fraction as F
try:
import unittest2 as unittest
except:
import unittest
import pyknon.simplemusic as music
class TestSimplemusic(unittest.TestCase):
def test_mod12(self):
self.assertEqual(music.mod12(0), 0)
self.assertEqual(music.mod12(1), 1)
self.assertEqual(music.mod12(13), 1)
self.assertEqual(music.mod12(14), 2)
self.assertEqual(music.mod12(-1), 11)
def test_interval(self):
self.assertEqual(music.interval(3, 4), 11)
self.assertEqual(music.interval(4, 3), 1)
self.assertEqual(music.interval(0, 12), 0)
self.assertEqual(music.interval(1, 10), 3)
self.assertEqual(music.interval(10, 1), 9)
def test_interval_class(self):
self.assertEqual(music.interval_class(1, 9), 4)
self.assertEqual(music.interval_class(9, 1), 4)
self.assertEqual(music.interval_class(11, 1), 2)
self.assertEqual(music.interval_class(1, 11), 2)
self.assertEqual(music.interval_class(1, -1), 2)
self.assertEqual(music.interval_class(3, 2), 1)
def test_intervals(self):
self.assertEqual(music.intervals([1, 2, 3]), [1, 1])
self.assertEqual(music.intervals([0, 4, 7]), [4, 3])
self.assertEqual(music.intervals([0, 11, 3]), [1, 4])
def test_all_intervals(self):
self.assertEqual(music.all_intervals([0, 1, 4]), [1, 3, 4])
self.assertEqual(music.all_intervals([4, 1, 0]), [1, 3, 4])
def test_transposition(self):
n1 = [3, 7, 11, 10]
n2 = [6, 10, 2, 1]
self.assertEqual(music.transposition(n1, 3), n2)
def test_is_related_by_transposition(self):
self.assertTrue(music.is_related_by_transposition([0, 4, 7], [1, 5, 8]))
self.assertTrue(music.is_related_by_transposition([0, 7, 4], [5, 8, 1]))
self.assertTrue(music.is_related_by_transposition([4, 0, 7], [5, 1, 8]))
self.assertFalse(music.is_related_by_transposition([4, 0, 7], [0, 3, 7]))
def test_inversion(self):
n1 = [0, 4, 7]
n2 = [0, 8, 5]
n3 = music.inversion(n1, 0)
self.assertEqual(n3, n2)
self.assertEqual(music.inversion(n3), n1)
def test_transposition_startswith(self):
n1 = [3, 7, 11, 10]
n2 = [4, 8, 0, 11]
self.assertEqual(music.transposition_startswith(n1, 4), n2)
def test_inversion_startswith(self):
n1 = [3, 7, 11, 10]
n2 = [3, 11, 7, 8]
self.assertEqual(music.inversion_startswith(n1, 3), n2)
self.assertEqual(music.inversion_startswith([11, 10, 7], 1), [1, 2, 5])
def test_inversion_first_note(self):
self.assertEqual(music.inversion_first_note([3, 7, 9]), [3, 11, 9])
def test_rotate(self):
n1 = [0, 1, 3, 7]
self.assertEqual(music.rotate(n1, 0), n1)
self.assertEqual(music.rotate(n1, 1), [1, 3, 7, 0])
self.assertEqual(music.rotate(n1, 2), [3, 7, 0, 1])
self.assertEqual(music.rotate(n1, 3), [7, 0, 1, 3])
self.assertEqual(music.rotate(n1, 4), [0, 1, 3, 7])
def test_rotate_set(self):
all_rotations = [[1,2,3], [2,3,1], [3,1,2]]
self.assertEqual(music.rotate_set([1,2,3]), all_rotations)
def test_retrograde(self):
self.assertEqual(music.retrograde([0, 4, 7, 10]), [10, 7, 4, 0])
def test_note_name(self):
self.assertEqual(music.note_name(0), "C")
self.assertEqual(music.note_name(12), "C")
self.assertEqual(music.note_name(1), "C#")
self.assertEqual(music.note_name(3), "D#")
def test_notes_names(self):
notes = [0, 4, 8, 10, 14]
self.assertEqual(music.notes_names(notes), ['C', 'E', 'G#', 'A#', 'D'])
def test_accidentals(self):
self.assertEqual(music.accidentals("C##"), 2)
self.assertEqual(music.accidentals("D##"), 2)
self.assertEqual(music.accidentals("Ebb"), -2)
self.assertEqual(music.accidentals("Ab"), -1)
def test_name_to_number(self):
self.assertEqual(music.name_to_number("D###"), 5)
self.assertEqual(music.name_to_number("D"), 2)
self.assertEqual(music.name_to_number("A"), 9)
self.assertEqual(music.name_to_number("Eb"), 3)
self.assertEqual(music.name_to_number("Cbbb"), 9)
def test_name_to_diatonic(self):
self.assertEqual(music.name_to_diatonic("C"), 0)
self.assertEqual(music.name_to_diatonic("D###"), 1)
self.assertEqual(music.name_to_diatonic("Bb"), 6)
def test_note_duration(self):
self.assertEqual(music.note_duration(1/4, 1/4, 60), 1.0)
self.assertEqual(music.note_duration(1/2, 1/4, 60), 2.0)
def test_dotted_duration(self):
self.assertEqual(music.dotted_duration(F(1/4), 0), F(1/4))
self.assertEqual(music.dotted_duration(F(1/4), 1), F(3/8))
self.assertEqual(music.dotted_duration(F(1/4), 2), F(7/16))
def test_durations(self):
self.assertEqual(music.durations([1/2, 1/4, 1/8], 1/4, 60), [2.0, 1.0, 0.5])
self.assertEqual(music.durations([1/2, 1/4, 1/8], 1/4, 120), [1.0, 0.5, 0.25])
class TestIntervalName(unittest.TestCase):
def test_interval_name_unison(self):
self.assertEqual(music.interval_name("C", "C"), "Perfect Unison")
self.assertEqual(music.interval_name("C", "C#"), "Augmented Unison")
def test_interval_name_second(self):
self.assertEqual(music.interval_name("D", "E"), "Major Second")
self.assertEqual(music.interval_name("D", "Eb"), "Minor Second")
self.assertEqual(music.interval_name("E", "F"), "Minor Second")
self.assertEqual(music.interval_name("E", "F#"), "Major Second")
self.assertEqual(music.interval_name("Eb", "F#"), "Augmented Second")
self.assertEqual(music.interval_name("E", "Fb"), "Diminished Second")
def test_interval_name_third(self):
self.assertEqual(music.interval_name("D", "F"), "Minor Third")
self.assertEqual(music.interval_name("D", "F#"), "Major Third")
self.assertEqual(music.interval_name("D", "Fb"), "Diminished Third")
self.assertEqual(music.interval_name("C", "E"), "Major Third")
self.assertEqual(music.interval_name("C", "Eb"), "Minor Third")
self.assertEqual(music.interval_name("Db", "F#"), "Augmented Third")
def test_interval_name_fourth(self):
self.assertEqual(music.interval_name("C", "F"), "Perfect Fourth")
self.assertEqual(music.interval_name("C", "F#"), "Augmented Fourth")
self.assertEqual(music.interval_name("C", "Fb"), "Diminished Fourth")
self.assertEqual(music.interval_name("F", "B"), "Augmented Fourth")
def test_interval_name_fifth(self):
self.assertEqual(music.interval_name("D", "A"), "Perfect Fifth")
self.assertEqual(music.interval_name("C", "Gb"), "Diminished Fifth")
self.assertEqual(music.interval_name("B", "F"), "Diminished Fifth")
self.assertEqual(music.interval_name("Bb", "F#"), "Augmented Fifth")
def test_interval_name_sixth(self):
self.assertEqual(music.interval_name("D", "B"), "Major Sixth")
self.assertEqual(music.interval_name("E", "C"), "Minor Sixth")
self.assertEqual(music.interval_name("D", "B#"), "Augmented Sixth")
self.assertEqual(music.interval_name("E", "Cb"), "Diminished Sixth")
def test_interval_name_seventh(self):
self.assertEqual(music.interval_name("C", "B"), "Major Seventh")
self.assertEqual(music.interval_name("E", "D#"), "Major Seventh")
self.assertEqual(music.interval_name("D", "C"), "Minor Seventh")
self.assertEqual(music.interval_name("D", "Cb"), "Diminished Seventh")
| 43.497175 | 86 | 0.634628 |
a4652252a2f5d4e76421d8b7285e113521aa3ff4
| 6,444 |
py
|
Python
|
app/models/user.py
|
abhisuri97/h4i-demo-flask
|
7c143553c12e7be5d2546ea41848bb0a39eb0cf4
|
[
"MIT"
] | null | null | null |
app/models/user.py
|
abhisuri97/h4i-demo-flask
|
7c143553c12e7be5d2546ea41848bb0a39eb0cf4
|
[
"MIT"
] | null | null | null |
app/models/user.py
|
abhisuri97/h4i-demo-flask
|
7c143553c12e7be5d2546ea41848bb0a39eb0cf4
|
[
"MIT"
] | null | null | null |
from flask import current_app
from flask_login import AnonymousUserMixin, UserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from itsdangerous import BadSignature, SignatureExpired
from werkzeug.security import check_password_hash, generate_password_hash
from .. import db, login_manager
class Permission:
GENERAL = 0x01
ADMINISTER = 0xff
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
index = db.Column(db.String(64))
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.GENERAL, 'main', True),
'Administrator': (
Permission.ADMINISTER,
'admin',
False # grants all permissions
)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.index = roles[r][1]
role.default = roles[r][2]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role \'%s\'>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
confirmed = db.Column(db.Boolean, default=False)
first_name = db.Column(db.String(64), index=True)
last_name = db.Column(db.String(64), index=True)
email = db.Column(db.String(64), unique=True, index=True)
password_hash = db.Column(db.String(128))
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
reviews = db.relationship('Review', back_populates='author')
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['ADMIN_EMAIL']:
self.role = Role.query.filter_by(
permissions=Permission.ADMINISTER).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
def full_name(self):
return '%s %s' % (self.first_name, self.last_name)
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_admin(self):
return self.can(Permission.ADMINISTER)
@property
def password(self):
raise AttributeError('`password` is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=604800):
"""Generate a confirmation token to email a new user."""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def generate_email_change_token(self, new_email, expiration=3600):
"""Generate an email change token to email an existing user."""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def generate_password_reset_token(self, expiration=3600):
"""
Generate a password reset change token to email to an existing user.
"""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def confirm_account(self, token):
"""Verify that the provided token is for this user's id."""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
db.session.commit()
return True
def change_email(self, token):
"""Verify the new email for this user."""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
db.session.add(self)
db.session.commit()
return True
def reset_password(self, token, new_password):
"""Verify the new password for this user."""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
db.session.commit()
return True
@staticmethod
def generate_fake(count=100, **kwargs):
"""Generate a number of fake users for testing."""
from sqlalchemy.exc import IntegrityError
from random import seed, choice
from faker import Faker
fake = Faker()
roles = Role.query.all()
seed()
for i in range(count):
u = User(
first_name=fake.first_name(),
last_name=fake.last_name(),
email=fake.email(),
password='password',
confirmed=True,
role=choice(roles),
**kwargs)
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def __repr__(self):
return '<User \'%s\'>' % self.full_name()
class AnonymousUser(AnonymousUserMixin):
def can(self, _):
return False
def is_admin(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
| 32.545455 | 76 | 0.609559 |
1995bf5779b779d80abf800906a19691a047e89f
| 29,103 |
py
|
Python
|
src/embedding/objects/train_obj.py
|
mykiscool/DeepCamera
|
e77cdbf45ab09895f315aa299bd6ac87b3bb6d66
|
[
"MIT"
] | 914 |
2019-03-07T14:57:45.000Z
|
2022-03-31T14:54:15.000Z
|
src/embedding/objects/train_obj.py
|
mykiscool/DeepCamera
|
e77cdbf45ab09895f315aa299bd6ac87b3bb6d66
|
[
"MIT"
] | 45 |
2019-03-11T09:53:37.000Z
|
2022-03-30T21:59:37.000Z
|
src/embedding/objects/train_obj.py
|
mykiscool/DeepCamera
|
e77cdbf45ab09895f315aa299bd6ac87b3bb6d66
|
[
"MIT"
] | 148 |
2019-03-08T00:40:28.000Z
|
2022-03-30T09:22:18.000Z
|
from __future__ import absolute_import, division, print_function
import hashlib
import os
import requests
import os.path
import random
import re
import struct
import sys
import tarfile
from datetime import datetime
from time import time
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import graph_util, tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
# path = os.path.join((os.path.abspath(os.path.pardir)))
# sys.path.append(path)
# from utilslib.save2gst import sendMessage2Group
def sendMessage2Group(uuid, group_id, text):
if (len(uuid) < 1) or (len(group_id) < 1) or (len(text) < 1):
return
API_SERVER_ADDRESS = os.getenv('API_SERVER_ADDRESS','workaihost.tiegushi.com')
API_SERVER_PORT = os.getenv('API_SERVER_PORT','80')
gst_api_url = 'http://'+API_SERVER_ADDRESS+':'+API_SERVER_PORT+'/restapi/workai-send2group'
#gst_api_url = 'http://192.168.1.73:3000/restapi/workai-send2group'
payload = {'uuid': uuid,
'group_id': group_id,
'type': 'text',
'text': text,
'ts': int(time())*1000
}
try:
requests.post(gst_api_url, data=payload, timeout=4)
except Exception as e:
print(e)
# image_dir = os.path.join("dataset")
# bottleneck_dir = "bottlenecks"
BASEPATH = os.path.abspath(os.getenv('RUNTIME_BASEDIR',os.path.dirname(__file__)))
# image_dir = os.path.join(os.getenv('RUNTIME_BASEDIR',os.path.abspath(os.path.dirname(__file__))), 'dataset')
# bottleneck_dir = os.path.join(os.getenv('RUNTIME_BASEDIR',os.path.abspath(os.path.dirname(__file__))), 'bottlenecks')
output_graph = "bottlenecks_graph.pb"
output_labels = "output_labels.txt"
summaries_dir = "/tmp/output_labels.txt"
how_many_training_steps = 300
learning_rate = 0.01
testing_percentage = 10
validation_percentage = 10
eval_step_interval = 10
train_batch_size = 100
test_batch_size = -1
validation_batch_size = 100
print_misclassified_test_images = False
model_dir = os.path.join('utilspb')
final_tensor_name = "final_result"
flip_left_right = False
random_crop = 0
random_scale = 0
random_brightness = 0
# pylint: enable=line-too-long
BOTTLENECK_TENSOR_SIZE = 2048
MODEL_INPUT_WIDTH = 299
MODEL_INPUT_HEIGHT = 299
MODEL_INPUT_DEPTH = 3
RESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
image_counter = 0
def create_image_lists(image_dir, testing_percentage, validation_percentage):
print('start create_image_lists')
print(image_dir)
if not gfile.Exists(image_dir):
print("Image directory '" + image_dir + "' not found.")
# return None
result = {}
sub_dirs = [x[0] for x in gfile.Walk(image_dir)]
print(sub_dirs)
# The root directory comes first, so skip it.
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
print("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(gfile.Glob(file_glob))
if not file_list:
print('No files found')
continue
if len(file_list) < 20:
print('WARNING: Folder has less than 20 images, which may cause issues.')
continue
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
print('WARNING: Folder {} has more than {} images. Some images will '
'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
#label_name = re.sub(r'[^A-Za-z0-9_]+', ' ', dir_name.lower())
label_name = dir_name
training_images = []
testing_images = []
validation_images = []
for i, file_name in enumerate(file_list):
base_name = os.path.basename(file_name)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put an image in, the data set creator has a way of
# grouping photos that are close variations of each other. For example
# this is used in the plant disease data set to group multiple pictures of
# the same leaf.
hash_name = re.sub(r'_nohash_.*$', '', file_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
if i == 0:
testing_images.append(base_name)
validation_images.append(base_name)
training_images.append(base_name)
elif i == 1:
validation_images.append(base_name)
else:
hash_name_hashed = hashlib.sha1(
compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if percentage_hash < validation_percentage:
validation_images.append(base_name)
elif percentage_hash < (testing_percentage + validation_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category):
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '.txt'
def ensure_dir_exists(dir_name):
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def write_list_of_floats_to_file(list_of_floats, file_path):
s = struct.pack('d' * BOTTLENECK_TENSOR_SIZE, *list_of_floats)
with open(file_path, 'wb') as f:
f.write(s)
def read_list_of_floats_from_file(file_path):
with open(file_path, 'rb') as f:
s = struct.unpack('d' * BOTTLENECK_TENSOR_SIZE, f.read())
return list(s)
bottleneck_path_2_bottleneck_values = {}
# time_taken is in seconds
#hours, rest = divmod(time_taken,3600)
#minutes, seconds = divmod(rest, 60)
def get_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir):
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(
image_lists, label_name, index, bottleneck_dir, category)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
did_hit_error = False
try:
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
except:
print("Invalid float found, recreating bottleneck")
did_hit_error = True
if did_hit_error:
return None
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir):
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
bottleneck = get_bottleneck(sess, image_lists, label_name, index,
image_dir, category, bottleneck_dir)
if bottleneck:
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
print(str(how_many_bottlenecks) +
' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir):
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if how_many >= 0:
# Retrieve a random sample of bottlenecks.
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_bottleneck(sess, image_lists, label_name,
image_index, image_dir, category,
bottleneck_dir)
if bottleneck:
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
else:
# Retrieve all bottlenecks.
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(
image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_bottleneck(sess, image_lists, label_name,
image_index, image_dir, category,
bottleneck_dir)
if bottleneck:
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
return bottlenecks, ground_truths, filenames
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def variable_summaries(var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def add_final_training_ops(class_count, final_tensor_name):
with tf.name_scope('input'):
bottleneck_input = tf.placeholder(tf.float32, shape=[
None, BOTTLENECK_TENSOR_SIZE], name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(tf.float32,
[None, class_count],
name='GroundTruthInput')
# Organizing the following ops as `final_training_ops` so they're easier
# to see in TensorBoard
layer_name = 'final_training_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
layer_weights = tf.Variable(tf.truncated_normal(
[BOTTLENECK_TENSOR_SIZE, class_count], stddev=0.001), name='final_weights')
variable_summaries(layer_weights)
with tf.name_scope('biases'):
layer_biases = tf.Variable(
tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases)
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.summary.histogram('pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
tf.summary.histogram('activations', final_tensor)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(
prediction, tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(
tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
class TrainFromBottlenecks:
def __init__(self):
self.sess=None
self.start_time1 = None
self.start_time2 = None
def trainingTesting(self):
# BASE_FOLDER = os.path.join(os.getenv('RUNTIME_BASEDIR',os.path.abspath(os.path.dirname(__file__))), 'dataset')
# BOTTLENECKS_FOLDER = os.path.join(os.getenv('RUNTIME_BASEDIR',os.path.abspath(os.path.dirname(__file__))), 'bottlenecks')
BASE_FOLDER = image_dir_path
BOTTLENECKS_FOLDER = bottleneck_dir_path
image_dir = BASE_FOLDER
bottleneck_dir = BOTTLENECKS_FOLDER
self.start_time1 = int(time())
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(image_dir, testing_percentage,
validation_percentage)
class_count = len(image_lists.keys())
if class_count == 0:
print('No valid folders of images found at ' + image_dir)
if class_count == 1:
print('Only one valid folder of images found at ' + image_dir +
' - multiple classes are needed for classification.')
if class_count < 2:
return "valid image not enough, multiple classes are needed for classification"
sess = tf.Session()
with sess.as_default():
# Setup the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(summaries_dir):
tf.gfile.DeleteRecursively(summaries_dir)
tf.gfile.MakeDirs(summaries_dir)
# Set up the pre-trained graph.
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir)
# Add the new layer that we'll be training.
(train_step, cross_entropy, bottleneck_input, ground_truth_input,
final_tensor) = add_final_training_ops(len(image_lists.keys()),
final_tensor_name)
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step, prediction = add_evaluation_step(
final_tensor, ground_truth_input)
# Merge all the summaries and write them out to /tmp/retrain_logs (by default)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(summaries_dir + '/validation')
# Set up all our weights to their initial default values.
init = tf.global_variables_initializer()
sess.run(init)
print("TrainFromBottlenecks __init__")
# Run the training for as many cycles as requested on the command line.
for i in range(how_many_training_steps):
# Get a batch of input bottleneck values, either calculated fresh every time
# with distortions applied, or from the cache stored on disk.
train_bottlenecks, train_ground_truth, _ = get_random_cached_bottlenecks(
sess, image_lists, train_batch_size, 'training',
bottleneck_dir, image_dir)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run([merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == how_many_training_steps)
if (i % eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
print('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i,
train_accuracy * 100))
print('%s: Step %d: Cross entropy = %f' % (datetime.now(), i,
cross_entropy_value))
validation_bottlenecks, validation_ground_truth, _ = (
get_random_cached_bottlenecks(
sess, image_lists, validation_batch_size, 'validation',
bottleneck_dir, image_dir))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
print('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %
(datetime.now(), i, validation_accuracy * 100,
len(validation_bottlenecks)))
self.start_time2 = int(time())
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(sess, image_lists, test_batch_size,
'testing', bottleneck_dir,
image_dir))
test_accuracy, predictions = sess.run(
[evaluation_step, prediction],
feed_dict={bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth})
print('Final test accuracy = %.1f%% (N=%d)' % (
test_accuracy * 100, len(test_bottlenecks)))
ret_log='Final test accuracy = %.1f%% (N=%d)' % (test_accuracy * 100, len(test_bottlenecks))
if print_misclassified_test_images:
print('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i].argmax():
print('%70s %s' %
(test_filename, image_lists.keys()[predictions[i]]))
# Write out the trained graph and labels with the weights stored as constants.
output_graph_def = graph_util.convert_variables_to_constants(
sess, sess.graph.as_graph_def(), [final_tensor_name])
with gfile.FastGFile(output_graph_path, 'wb') as f:
f.write(output_graph_def.SerializeToString())
with gfile.FastGFile(output_labels_path, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
sess.close()
return ('Training finised, Total time=%d Final test accuracy = %.1f%% (N=%d)' % (
(self.start_time2-self.start_time1), test_accuracy * 100, len(test_bottlenecks)))
def train_from_bottlenecks(image_dir, bottleneck_dir):
device_id = sys.argv[1]
group_id = sys.argv[2]
start_time1 = int(time())
sess = tf.Session()
# Setup the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(summaries_dir):
tf.gfile.DeleteRecursively(summaries_dir)
tf.gfile.MakeDirs(summaries_dir)
# Set up the pre-trained graph.
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(image_dir, testing_percentage,
validation_percentage)
print(image_lists)
class_count = len(image_lists.keys())
if class_count == 0:
print('No valid folders of images found at ' + image_dir)
if class_count == 1:
print('Only one valid folder of images found at ' + image_dir +
' - multiple classes are needed for classification.')
if class_count < 2:
return "valid image not enough, multiple classes are needed for classification"
sendMessage2Group(device_id, group_id, "Training now ...")
start_time = None
image_counter = 0
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir)
# Add the new layer that we'll be training.
(train_step, cross_entropy, bottleneck_input, ground_truth_input,
final_tensor) = add_final_training_ops(len(image_lists.keys()),
final_tensor_name)
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step, prediction = add_evaluation_step(
final_tensor, ground_truth_input)
# Merge all the summaries and write them out to /tmp/retrain_logs (by default)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(summaries_dir + '/validation')
# Set up all our weights to their initial default values.
init = tf.global_variables_initializer()
sess.run(init)
# Run the training for as many cycles as requested on the command line.
for i in range(how_many_training_steps):
# Get a batch of input bottleneck values, either calculated fresh every time
# with distortions applied, or from the cache stored on disk.
train_bottlenecks, train_ground_truth, _ = get_random_cached_bottlenecks(
sess, image_lists, train_batch_size, 'training',
bottleneck_dir, image_dir)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run([merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == how_many_training_steps)
if (i % eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
print('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i,
train_accuracy * 100))
print('%s: Step %d: Cross entropy = %f' % (datetime.now(), i,
cross_entropy_value))
validation_bottlenecks, validation_ground_truth, _ = (
get_random_cached_bottlenecks(
sess, image_lists, validation_batch_size, 'validation',
bottleneck_dir, image_dir))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
print('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %
(datetime.now(), i, validation_accuracy * 100,
len(validation_bottlenecks)))
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
start_time2 = int(time())
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(sess, image_lists, test_batch_size,
'testing', bottleneck_dir,
image_dir))
test_accuracy, predictions = sess.run(
[evaluation_step, prediction],
feed_dict={bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth})
print('Final test accuracy = %.1f%% (N=%d)' % (
test_accuracy * 100, len(test_bottlenecks)))
if print_misclassified_test_images:
print('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i].argmax():
print('%70s %s' %
(test_filename, image_lists.keys()[predictions[i]]))
# Write out the trained graph and labels with the weights stored as constants.
output_graph_def = graph_util.convert_variables_to_constants(
sess, sess.graph.as_graph_def(), [final_tensor_name])
with gfile.FastGFile(output_graph_path, 'wb') as f:
f.write(output_graph_def.SerializeToString())
with gfile.FastGFile(output_labels_path, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
log = ('Training finised, Total time=%d Final test accuracy = %.1f%% (N=%d)' % (
(start_time2 - start_time1), test_accuracy * 100, len(test_bottlenecks)))
sendMessage2Group(device_id, group_id, log)
if __name__ == '__main__':
groupid = sys.argv[2]
group_path = os.path.join(BASEPATH, groupid)
image_dir_path = os.path.join(group_path, 'dataset')
bottleneck_dir_path = os.path.join(group_path, "bottlenecks")
output_graph_path = os.path.join(group_path, output_graph)
output_labels_path = os.path.join(group_path, output_labels)
if not os.path.exists(group_path):
os.mkdir(group_path)
os.mkdir(image_dir_path)
os.mkdir(bottleneck_dir_path)
train_from_bottlenecks(image_dir=image_dir_path, bottleneck_dir=bottleneck_dir_path)
| 47.24513 | 131 | 0.620692 |
9e830431587c9e0ff79f1a22f27fad3e98ff399e
| 4,508 |
py
|
Python
|
python/JSONManifestHandler.py
|
Kcjohnson/SCGP
|
e757b3b750ce8ccf15085cb4bc60f2dfd4d9a285
|
[
"MIT"
] | 16 |
2020-11-09T14:23:45.000Z
|
2022-03-22T07:10:15.000Z
|
python/JSONManifestHandler.py
|
Kcjohnson/SCGP
|
e757b3b750ce8ccf15085cb4bc60f2dfd4d9a285
|
[
"MIT"
] | 3 |
2020-01-16T06:07:35.000Z
|
2020-06-01T15:51:32.000Z
|
python/JSONManifestHandler.py
|
Kcjohnson/SCGP
|
e757b3b750ce8ccf15085cb4bc60f2dfd4d9a285
|
[
"MIT"
] | 7 |
2020-12-14T02:51:42.000Z
|
2022-02-18T07:15:55.000Z
|
"""
JSON file
Manifest handler
"""
from python.ManifestHandler import ManifestHandler
import psycopg2
import psycopg2.extras
class JSONManifestHandler(ManifestHandler):
def __init__(self, cases_json, samples_json, aliquots_json, files_json, readgroups_json, pairs_json):
self.cases = json.load(open(cases_json))
self.samples = json.load(open(samples_json))
self.aliquots = json.load(open(aliquots_json))
self.files = json.load(open(files_json))
self.readgroups = json.load(open(readgroups_json))
self.pairs = json.load(open(pairs_json))
## CASES -> DICT
self.cases_dict = build_dict(self.cases, "case_id")
## SAMPLES -> DICT
self.samples_dict = build_dict(self.samples, "sample_id")
## ALIQUOTS -> DICT
self.aliquots_dict = build_dict(self.aliquots, "aliquot_id")
## FILES -> DICT
self.files_dict = build_dict(self.files, "file_uuid")
## Pair IDs are unique, PAIRS -> DICT
self.pairs_dict = build_dict(self.pairs, "pair_id")
## Aliquot IDs and BAM files map 1:1
self.ALIQUOT_TO_BAM_PATH = {}
for file in self.files:
if file["file_format"] == "BAM":
self.ALIQUOT_TO_BAM_PATH[ file["aliquot_id"] ] = file["file_path"]
## Case to aliquots
## Dict of aliquots per case
self.CASE_TO_ALIQUOT = {}
for aliquot in ALIQUOTS:
aliquot["case_id"] = self.samples_dict[ aliquot["sample_id"] ]["case_id"]
if aliquot["case_id"] not in self.CASE_TO_ALIQUOT:
self.CASE_TO_ALIQUOT[ aliquot["case_id"] ] = [ aliquot["aliquot_id"] ]
elif aliquot["aliquot_id"] not in self.CASE_TO_ALIQUOT[ aliquot["case_id"] ]:
self.CASE_TO_ALIQUOT[ aliquot["case_id"] ].append(aliquot["aliquot_id"])
## Aliquots and RGIDs map 1:many
self.ALIQUOT_TO_RGID = {}
self.ALIQUOT_TO_LEGACY_RGID = {}
for readgroup in self.readgroups:
if readgroup["aliquot_id"] not in self.ALIQUOT_TO_RGID:
self.ALIQUOT_TO_RGID[ readgroup["aliquot_id"] ] = [ readgroup["readgroup_id"] ]
else:
self.ALIQUOT_TO_RGID[ readgroup["aliquot_id"] ].append(readgroup["readgroup_id"])
if "legacy_readgroup_id" not in readgroup or len(readgroup["legacy_readgroup_id"]) == 0:
continue
if readgroup["aliquot_id"] not in self.ALIQUOT_TO_LEGACY_RGID:
self.ALIQUOT_TO_LEGACY_RGID[ readgroup["aliquot_id"] ] = [ readgroup["legacy_readgroup_id"] ]
else:
self.ALIQUOT_TO_LEGACY_RGID[ readgroup["aliquot_id"] ].append(readgroup["legacy_readgroup_id"])
## Readgroup information and
## Aliquots and RGIDs map 1:many
## RGIDs are unique within an aliquot
## Aliquot IDs and fastQ files map 1:many
## Because FQ files are also seperated by readgroup, create dictionary of FQ files here as well
self.ALIQUOT_TO_READGROUP = {}
self.ALIQUOT_TO_FQ_PATH = {}
for readgroup in self.readgroups:
if readgroup["aliquot_id"] not in self.ALIQUOT_TO_READGROUP:
self.ALIQUOT_TO_READGROUP[ readgroup["aliquot_id"] ] = { readgroup["readgroup_id"] : readgroup }
else:
self.ALIQUOT_TO_READGROUP[ readgroup["aliquot_id"] ][ readgroup["readgroup_id"] ] = readgroup
self.ALIQUOT_TO_READGROUP[ readgroup["aliquot_id"] ][ readgroup["readgroup_id"] ]["file_path"] = self.files_dict[ self.ALIQUOT_TO_READGROUP[ readgroup["aliquot_id"] ][ readgroup["readgroup_id"] ]["file_uuid"] ]["file_path"]
self.ALIQUOT_TO_READGROUP[ readgroup["aliquot_id"] ][ readgroup["readgroup_id"] ]["file_format"] = self.files_dict[ self.ALIQUOT_TO_READGROUP[ readgroup["aliquot_id"] ][ readgroup["readgroup_id"] ]["file_uuid"] ]["file_format"]
if self.ALIQUOT_TO_READGROUP[ readgroup["aliquot_id"] ][ readgroup["readgroup_id"] ]["file_format"] == "FQ":
if readgroup["aliquot_id"] not in self.ALIQUOT_TO_FQ_PATH:
self.ALIQUOT_TO_FQ_PATH[ readgroup["aliquot_id"] ] = {}
self.ALIQUOT_TO_FQ_PATH[ readgroup["aliquot_id"] ][ readgroup["readgroup_id"] ] = self.ALIQUOT_TO_READGROUP[ readgroup["aliquot_id"] ][ readgroup["readgroup_id"] ]["file_path"].split(",")
## IMPLEMENTATION PENDING
## END ##
| 50.651685 | 239 | 0.636868 |
741946c2d88c2dcd13d050a94db8ae629b61618d
| 1,484 |
py
|
Python
|
photos/models.py
|
gumato/gallery
|
32a2d7b895cbba990ba5c9e5fa3ed9405dac4d09
|
[
"Unlicense"
] | null | null | null |
photos/models.py
|
gumato/gallery
|
32a2d7b895cbba990ba5c9e5fa3ed9405dac4d09
|
[
"Unlicense"
] | null | null | null |
photos/models.py
|
gumato/gallery
|
32a2d7b895cbba990ba5c9e5fa3ed9405dac4d09
|
[
"Unlicense"
] | null | null | null |
from django.db import models
# Create your models here.
class Location(models.Model):
location=models.CharField(max_length=30)
def save_locations(self):
self.save()
def delete_locations(self):
self.delete()
def __str__(self):
return self.location
class Category(models.Model):
name = models.CharField(max_length =100)
def save_category(self):
self.save()
def delete_category(self):
self.delete()
def __str__(self):
return self.name
class Image(models.Model):
image = models.ImageField(upload_to='photo/')
image_name = models.CharField(max_length =20)
image_description = models.TextField(max_length=80)
location = models.ForeignKey(Location)
category = models.ManyToManyField(Category)
pub_date = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['pub_date']
def save_image(self):
self.save()
def delete_image(self):
self.delete()
@classmethod
def get_all(cls):
images = cls.objects.all()
return images
@classmethod
def filter_category(cls,query):
images = cls.objects.filter(category__name=query)
return images
@classmethod
def search_by_category(cls, search_term):
images = cls.objects.filter(category__category__icontains = search_term)
return images
def __str__(self):
return self.image_name
| 20.901408 | 80 | 0.652291 |
8e31b2dac8489336cd1e06cecbc5aadc0d2594bb
| 1,235 |
py
|
Python
|
task_board/tasks/serializers.py
|
AlexanderKaluzhny/task_board
|
286eeb52403d695d6a61ec42bea7fa32e5aca228
|
[
"MIT"
] | 3 |
2017-04-22T11:22:39.000Z
|
2020-01-22T12:22:56.000Z
|
task_board/tasks/serializers.py
|
AlexanderKaluzhny/task_board
|
286eeb52403d695d6a61ec42bea7fa32e5aca228
|
[
"MIT"
] | 12 |
2017-12-14T07:56:33.000Z
|
2022-03-12T00:03:56.000Z
|
task_board/tasks/serializers.py
|
AlexanderKaluzhny/task_board
|
286eeb52403d695d6a61ec42bea7fa32e5aca228
|
[
"MIT"
] | 1 |
2022-01-29T16:44:50.000Z
|
2022-01-29T16:44:50.000Z
|
from rest_framework import serializers
from task_board.tasks.models import Task, TaskStatuses
class TaskSerializer(serializers.ModelSerializer):
created_by_username = serializers.StringRelatedField(source='created_by')
accomplished_by_username = serializers.StringRelatedField(source='accomplished_by')
status_readable = serializers.ReadOnlyField(source='get_status_display')
class Meta:
model = Task
fields = ['id', 'name', 'description', 'status',
'created_by', 'created_by_username',
'accomplished_by', 'accomplished_by_username',
'status_readable']
def __init__(self, *args, **kwargs):
# specify the user who accomplished the task if the status 'done' is set
if 'data' in kwargs and 'context' in kwargs and 'request' in kwargs['context']:
request = kwargs['context']['request']
data = kwargs['data']
if 'status' in data and str(data['status']) == str(TaskStatuses.DONE):
data = kwargs['data'].copy()
data.update({'accomplished_by': request.user.pk})
kwargs['data'] = data
super(TaskSerializer, self).__init__(*args, **kwargs)
| 41.166667 | 87 | 0.645344 |
bafd2d1ff44b0b3eee2aac01505cde7e13dfd07d
| 25,763 |
py
|
Python
|
pmb.py
|
BTEST4HE/GT3MBLPMBTools
|
09408d2a79971073f9d2a47f1d64e0ddcfa088cf
|
[
"MIT"
] | null | null | null |
pmb.py
|
BTEST4HE/GT3MBLPMBTools
|
09408d2a79971073f9d2a47f1d64e0ddcfa088cf
|
[
"MIT"
] | null | null | null |
pmb.py
|
BTEST4HE/GT3MBLPMBTools
|
09408d2a79971073f9d2a47f1d64e0ddcfa088cf
|
[
"MIT"
] | null | null | null |
import helper
pmb_dict = {
'pmb10': 0x20,
'pmb20': 0x39,
'pmb21': 0x7D,
'pmb40': 0x7E,
'pmb40s': 0x5E
}
"""
pmb_dict
0x01 : chr_size = 0x20
0x02 : chr_size = 0x40
0x04 : FUNC data availability
0x08 : Availability of recursive data for SUB
0x10 : Presence of variable "text_unk_3" in TEXT data
0x20 : euc_jp when ON, sjis when OFF
0x40 : PMBBIN with or without some padding
"""
pmb_dict_list = list(pmb_dict.keys())
pmb_control_flags_list = [[['pmb10', 'pmb21'], [0x10000, 0]],
[['pmb10', 'pmb40'], [0x10000, 0]],
[['pmb10', 'pmb40s'], [0x10000, 0]],
[['pmb20', 'pmb21'], [0x10000, 0]],
[['pmb20', 'pmb40'], [0x10000, 0]],
[['pmb20', 'pmb40s'], [0x10000, 0]],
[['pmb10', 'pmb20'], [0x8, 0]],
[['pmb10', 'pmb21'], [0x8, 0]],
[['pmb10', 'pmb40'], [0x8, 0]],
[['pmb10', 'pmb40s'], [0x8, 0]]
]
def importPmb(source, pmb_flags):
if (pmb_flags & 0x1) != 0:
chr_size = 0x20
elif (pmb_flags & 0x2) != 0:
chr_size = 0x40
else:
chr_size = 0x10
# FileHeader
r_m_size = helper.unpackOneFormat("L", source, 0x00, 0)
r_offset = helper.unpackOneFormat("L", source, 0x04, 0)
r_img_size = helper.unpackOneFormat("L", source, 0x08, 0)
r_img_offset = helper.unpackOneFormat("L", source, 0x0C, 0)
pmb_tree = ["ROOT", [k for k, v in pmb_dict.items() if v == pmb_flags][0], [], []]
pointer = 0x10
# SUB
helper.checkPointer(r_offset, pointer)
(m_offset_list, pointer) = helper.getOffsetAddress(source, r_m_size, pointer)
(pmb_tree[2], pointer) = importPmbMAIN(source, pmb_tree[2], chr_size, pmb_flags, m_offset_list, pointer)
# IMG
helper.checkPointer(r_img_offset, pointer)
img_bin = None
if r_img_size > 0:
img_bin = importPmbPBIN(source, pmb_flags, r_img_size, pointer)
return pmb_tree, img_bin
def importPmbMAIN(source, pmb_tree, chr_size, pmb_flags, offset_list, pointer):
enc = 'euc_jp'
for i in range(len(offset_list)):
m_pointer = 0
m_label = helper.getName(source, offset_list[i], enc, 0) # ラベル
m_pointer += chr_size
m_s_size = helper.unpackOneFormat("L", source, offset_list[i] + m_pointer, 0) # 次のアドレスの数
m_pointer += 4
m_s_offset = helper.unpackOneFormat("L", source, offset_list[i] + m_pointer, 0) # 次のアドレス
m_pointer += 4
m_layer_size = helper.unpackOneFormat("L", source, offset_list[i] + m_pointer, 0) # 次のアドレスの数
m_pointer += 4
m_layer_offset = helper.unpackOneFormat("L", source, offset_list[i] + m_pointer, 0) # 次のアドレス
m_pointer += 4
pmb_tree.append([['MAIN', m_label], [], []])
pointer += m_pointer
helper.checkPointer(m_s_offset, pointer)
(s_offset_list, pointer) = helper.getOffsetAddress(source, m_s_size, pointer)
(pmb_tree[i][1], pointer) = importPmbSUB(source, pmb_tree[i][1], chr_size, pmb_flags, s_offset_list, pointer)
helper.checkPointer(m_layer_offset, pointer)
(layer_offset_list, pointer) = helper.getOffsetAddress(source, m_layer_size, pointer)
(pmb_tree[i][2], pointer) = importPmbLAYER(source, pmb_tree[i][2], chr_size, pmb_flags, layer_offset_list,
pointer)
return pmb_tree, pointer
def importPmbSUB(source, pmb_tree, chr_size, pmb_flags, offset_list, pointer):
enc = 'euc_jp'
for i in range(len(offset_list)):
s_pointer = 0
s_label_1 = helper.getName(source, offset_list[i], enc, 0)
s_pointer += chr_size
s_flags = helper.unpackOneFormat("L", source, offset_list[i] + s_pointer, 0)
s_pointer += 4
s_unk_1 = helper.unpackOneFormat("L", source, offset_list[i] + s_pointer, 0)
s_pointer += 4
s_unk_2 = helper.unpackOneFormat("L", source, offset_list[i] + s_pointer, 0)
s_pointer += 4
s_unk_3 = helper.unpackOneFormat("L", source, offset_list[i] + s_pointer, 0)
s_pointer += 4
s_unk_4 = helper.unpackOneFormat("L", source, offset_list[i] + s_pointer, 0)
s_pointer += 4
s_unk_5 = helper.unpackOneFormat("L", source, offset_list[i] + s_pointer, 0)
s_pointer += 4
if (pmb_flags & 0x4) != 0:
s_func_offset = helper.unpackOneFormat("L", source, offset_list[i] + s_pointer, 0)
s_pointer += 4
else:
s_func_offset = 0
if (pmb_flags & 0x8) != 0:
ss_offset = helper.unpackOneFormat("L", source, offset_list[i] + s_pointer, 0)
s_pointer += 4
else:
ss_offset = 0
s_image_offset = helper.unpackOneFormat("L", source, offset_list[i] + s_pointer, 0)
s_pointer += 4
s_box_offset = helper.unpackOneFormat("L", source, offset_list[i] + s_pointer, 0)
s_pointer += 4
s_text_offset = helper.unpackOneFormat("L", source, offset_list[i] + s_pointer, 0)
s_pointer += 4
pmb_tree.append([['SUB', s_label_1, s_flags, s_unk_1, s_unk_2, s_unk_3, s_unk_4, s_unk_5],
[], [], [], [], []])
pointer += s_pointer
if (s_flags & 0x30000) == 0x10000 and (pmb_flags & 0x4) != 0:
helper.checkPointer(s_func_offset, pointer)
(pmb_tree[i][1], pointer) = importPmbFUNC(source, pmb_flags, pointer)
if (s_flags & 8) != 0 and (pmb_flags & 0x8) != 0:
helper.checkPointer(ss_offset, pointer)
(pmb_tree[i][2], pointer) = importPmbSUB(source, pmb_tree[i][2], chr_size, pmb_flags, [pointer], pointer)
if (s_flags & 1) != 0:
helper.checkPointer(s_image_offset, pointer)
(pmb_tree[i][3], pointer) = importPmbIMAGE(source, pointer)
if (s_flags & 2) != 0:
helper.checkPointer(s_box_offset, pointer)
(pmb_tree[i][4], pointer) = importPmbBOX(source, pointer)
if (s_flags & 4) != 0:
helper.checkPointer(s_text_offset, pointer)
(pmb_tree[i][5], pointer) = importPmbTEXT(source, pmb_flags, pointer)
return pmb_tree, pointer
def importPmbFUNC(source, pmb_flags, pointer):
enc = checkEncodingFromPmbFlags(pmb_flags)
func_label_offset = helper.unpackOneFormat("L", source, pointer, 0)
func_label = helper.getName(source, func_label_offset, enc, 0)
pmb_tree = ['FUNC', func_label]
pointer += 0x4 + helper.getLen(func_label, enc)
return pmb_tree, pointer
def importPmbIMAGE(source, pointer):
i_image_id = helper.unpackOneFormat("L", source, pointer, 0)
i_unk_2 = helper.unpackOneFormat("L", source, pointer + 0x4, 0)
i_unk_3 = helper.unpackOneFormat("L", source, pointer + 0x8, 0)
i_unk_4 = helper.unpackOneFormat("L", source, pointer + 0xC, 0)
i_unk_5 = helper.unpackOneFormat("L", source, pointer + 0x10, 0)
i_unk_6 = helper.unpackOneFormat("L", source, pointer + 0x14, 0)
i_unk_7 = helper.unpackOneFormat("L", source, pointer + 0x18, 0)
i_unk_8 = helper.unpackOneFormat("L", source, pointer + 0x1C, 0)
i_unk_9 = helper.unpackOneFormat("L", source, pointer + 0x20, 0)
pmb_tree = ['IMAGE', i_image_id, i_unk_2, i_unk_3, i_unk_4, i_unk_5,
i_unk_6, i_unk_7, i_unk_8, i_unk_9]
pointer += 0x24
return pmb_tree, pointer
def importPmbBOX(source, pointer):
box_unk_1 = helper.unpackOneFormat("L", source, pointer, 0)
box_unk_2 = helper.unpackOneFormat("L", source, pointer + 0x4, 0)
box_unk_3 = helper.unpackOneFormat("L", source, pointer + 0x8, 0)
box_unk_4 = helper.unpackOneFormat("L", source, pointer + 0xC, 0)
pmb_tree = ['BOX', box_unk_1, box_unk_2, box_unk_3, box_unk_4]
pointer += 0x10
return pmb_tree, pointer
def importPmbTEXT(source, pmb_flags, pointer):
enc = checkEncodingFromPmbFlags(pmb_flags)
text_pointer = 0
text_unk_1 = helper.unpackOneFormat("B", source, pointer, 0)
text_pointer += 2
text_unk_2 = helper.unpackOneFormat("B", source, pointer + text_pointer, 0)
text_pointer += 2
if (pmb_flags & 0x10) != 0:
text_unk_3 = helper.unpackOneFormat("L", source, pointer + text_pointer, 0)
text_pointer += 4
else:
text_unk_3 = 0
text_label1_offset = helper.unpackOneFormat("L", source, pointer + text_pointer, 0)
text_pointer += 4
text_label2_offset = helper.unpackOneFormat("L", source, pointer + text_pointer, 0)
text_pointer += 4
text_label1 = helper.getName(source, text_label1_offset, enc, 0)
text_label2 = helper.getName(source, text_label2_offset, enc, 0)
pmb_tree = ['TEXT1', text_unk_1, text_unk_2, text_unk_3, text_label1, text_label2]
pointer += text_pointer + helper.getLen(text_label1, enc) + helper.getLen(text_label2, enc)
return pmb_tree, pointer
def importPmbLAYER(source, pmb_tree, chr_size, pmb_flags, offset_list, pointer):
enc = 'euc_jp'
for i in range(len(offset_list)):
layer_pointer = 0
layer_label = helper.getName(source, offset_list[i], enc, 0)
layer_pointer += chr_size
layer_s1_size = helper.unpackOneFormat("L", source, offset_list[i] + layer_pointer, 0)
layer_pointer += 4
layer_s1_offset = helper.unpackOneFormat("L", source, offset_list[i] + layer_pointer, 0)
layer_pointer += 4
layer_s2_size = helper.unpackOneFormat("L", source, offset_list[i] + layer_pointer, 0)
layer_pointer += 4
layer_s2_offset = helper.unpackOneFormat("L", source, offset_list[i] + layer_pointer, 0)
layer_pointer += 4
pmb_tree.append([['LAYER', layer_label], [], []])
pointer += layer_pointer
helper.checkPointer(layer_s1_offset, pointer)
(s1_offset_list, pointer) = helper.getOffsetAddress(source, layer_s1_size, pointer)
(pmb_tree[i][1], pointer) = importPmbSUB(source, pmb_tree[i][1], chr_size, pmb_flags, s1_offset_list, pointer)
helper.checkPointer(layer_s2_offset, pointer)
(s2_offset_list, pointer) = helper.getOffsetAddress(source, layer_s2_size, pointer)
(pmb_tree[i][2], pointer) = importPmbSUB(source, pmb_tree[i][2], chr_size, pmb_flags, s2_offset_list, pointer)
return pmb_tree, pointer
def importPmbPBIN(pmb, pmb_flags, img_count, pointer):
b_i_h = helper.setZeroPaddingForLabel("PBIN", 4, "euc_jp")
b_i_l = b''
b_i_m = b''
b_i_h += (0).to_bytes(4, byteorder="little")
b_i_h += img_count.to_bytes(4, byteorder="little")
b_i_h += (0x10).to_bytes(4, byteorder="little")
# Defining IMG list and offset
img_size_list = []
img_offset_list = []
base_ofs = pointer - 0x10
for i in range(img_count):
img_size = helper.unpackOneFormat("L", pmb, pointer, 0)
img_offset = helper.unpackOneFormat("L", pmb, pointer + 0x4, 0)
# Creating a list of IMG
img_size_list.append(img_size)
img_offset_list.append(img_offset)
pointer += 0x8
# Creating an offset list for PBIN files
img_out_offset_list = [n - base_ofs for n in img_offset_list]
# Create binary data for PBIN file
padding = 0 # count of padding data at the end of gz file.
for i in range(img_count):
# Set padding
img_header = pmb[img_offset_list[i]:img_offset_list[i] + 4]
# gz or img
if img_header[:2] == b'\x1f\x8b':
if pointer % 4 != 0:
padding += 4 - (pointer % 4)
pointer += 4 - (pointer % 4)
elif img_header == b'Tex1':
if pointer % 0x10 != 0:
padding += 0x10 - (pointer % 0x10)
pointer += 0x10 - (pointer % 0x10)
else:
raise ValueError("error!")
# Create binary data for IMG list
b_i_l += img_size_list[i].to_bytes(4, byteorder="little")
b_i_l += (img_out_offset_list[i] - padding).to_bytes(4, byteorder="little")
# Acquisition of img file (if statement is processed when i+1 is out of range)
if i + 1 != img_count:
img = pmb[img_offset_list[i]:img_offset_list[i + 1]]
else:
img = pmb[img_offset_list[i]:]
# gz or img
if img[:2] == b'\x1f\x8b':
check_padding = helper.unpackOneFormat("L", img[-4:], 0, 0)
while check_padding != img_size_list[i]:
img = img[:-1]
check_padding = helper.unpackOneFormat("L", img[-4:], 0, 0)
padding += 1
pointer += 1
elif img[:4] == b'Tex1':
pass
else:
raise ValueError("error!")
pointer += len(img)
if i + 1 == img_count and pointer % 4 != 0 and (pmb_flags & 0x40) != 0:
check_padding = 4 - (pointer % 4)
pointer -= check_padding
img = img[:pointer]
# Add img to binary
b_i_m += img
# Add all binary elements.
b_i = b_i_h + b_i_l + b_i_m
return b_i
def setPmbGetAddress(pmb, offset, size, pointer):
l_address = []
for i in range(size):
l_address.append(helper.unpackOneFormat("L", pmb, offset, 0))
offset += 0x04
pointer += 0x04
return l_address, pointer
def checkEncodingFromPmbFlags(pmb_flags):
if (pmb_flags & 0x20) != 0:
return "euc_jp"
else:
return "shift_jis"
def exportPmb(pmb_tree, img_bin, pmb_flags):
if (pmb_flags & 0x1) != 0:
chr_size = 0x20
elif (pmb_flags & 0x2) != 0:
chr_size = 0x40
else:
chr_size = 0x10
m_size = len(pmb_tree[2])
b_r = m_size.to_bytes(4, byteorder='little')
pointer = 0x10
b_r += pointer.to_bytes(4, byteorder='little')
b_m = b""
b_m_offset = b""
m_offset_list = []
pointer += 0x4 * m_size
for i in range(m_size):
m_offset_list.append(pointer.to_bytes(4, byteorder='little'))
(b, pointer) = exportPmbMAIN(pmb_tree[2][i], chr_size, pmb_flags, pointer)
b_m += b
img_size = 0
img_offset = 0
if img_bin is not None:
img_pointer = 0x8
img_size = helper.unpackOneFormat("L", img_bin, img_pointer, 0)
img_pointer += 0x4
img_offset = helper.unpackOneFormat("L", img_bin, img_pointer, 0)
img_pointer += (img_offset - 0x10) + 0x4
b_r += img_size.to_bytes(4, byteorder='little')
b_r += pointer.to_bytes(4, byteorder='little')
b_img = b""
if img_bin is not None:
b_img = exportPmbPBIN(img_bin, pmb_flags, img_size, img_offset, pointer)
for i in range(m_size):
b_m_offset += m_offset_list[i]
b_r += b_m_offset + b_m + b_img
return b_r
def exportPmbMAIN(pmb_tree, chr_size, pmb_flags, pointer):
enc = 'euc_jp'
b_m = b""
b_m += helper.setZeroPaddingForLabel(pmb_tree[0][1], chr_size, enc) # MAIN_label
s_size = len(pmb_tree[1])
b_m += s_size.to_bytes(4, byteorder='little') # MAIN_SUB_size
pointer += chr_size + (0x4 * 4)
b_m += pointer.to_bytes(4, byteorder='little') # MAIN_SUB_offset
b_s = b""
b_s_offset = b""
s_offset_list = []
pointer += 0x4 * s_size
for i in range(s_size):
s_offset_list.append(pointer.to_bytes(4, byteorder='little'))
(b, pointer) = exportPmbSUB(pmb_tree[1][i], chr_size, pmb_flags, pointer)
b_s += b
for i in range(s_size):
b_s_offset += s_offset_list[i]
layer_size = len(pmb_tree[2])
b_m += layer_size.to_bytes(4, byteorder='little') # MAIN_LAYER_size
b_m += pointer.to_bytes(4, byteorder='little') # MAIN_LAYER_offset
b_layer = b""
b_layer_offset = b""
layer_offset_list = []
pointer += 0x4 * layer_size
for i in range(layer_size):
layer_offset_list.append(pointer.to_bytes(4, byteorder='little'))
(b, pointer) = exportPmbLAYER(pmb_tree[2][i], chr_size, pmb_flags, pointer)
b_layer += b
for i in range(layer_size):
b_layer_offset += layer_offset_list[i]
b_m += b_s_offset + b_s + b_layer_offset + b_layer
return b_m, pointer
def exportPmbSUB(pmb_tree, chr_size, pmb_flags, pointer):
enc = 'euc_jp'
b_s = b""
b_s_func = b""
b_ss = b""
b_s_image = b""
b_s_box = b""
b_s_text = b""
b_s += helper.setZeroPaddingForLabel(pmb_tree[0][1], chr_size, enc)
s_flags = pmb_tree[0][2]
b_s += s_flags.to_bytes(4, byteorder='little')
b_s += pmb_tree[0][3].to_bytes(4, byteorder='little')
b_s += pmb_tree[0][4].to_bytes(4, byteorder='little')
b_s += pmb_tree[0][5].to_bytes(4, byteorder='little')
b_s += pmb_tree[0][6].to_bytes(4, byteorder='little')
b_s += pmb_tree[0][7].to_bytes(4, byteorder='little')
if (pmb_flags & 0x8) != 0:
pointer += 0x4
if (pmb_flags & 0x4) != 0:
pointer += 0x4
pointer += chr_size + 0x24
if (pmb_flags & 0x4) != 0:
if (s_flags & 0x30000) == 0x10000:
b_s += pointer.to_bytes(4, byteorder='little')
(b_s_func, pointer) = exportPmbFUNC(pmb_tree[1], pmb_flags, pointer)
else:
b_s += (0).to_bytes(4, byteorder='little')
if (pmb_flags & 0x8) != 0:
if (s_flags & 8) != 0:
b_s += pointer.to_bytes(4, byteorder='little')
(b_ss, pointer) = exportPmbSUB(pmb_tree[2][0], chr_size, pmb_flags, pointer)
else:
b_s += (0).to_bytes(4, byteorder='little')
if (s_flags & 1) != 0:
b_s += pointer.to_bytes(4, byteorder='little')
(b_s_image, pointer) = exportPmbIMAGE(pmb_tree[3], pointer)
else:
b_s += (0).to_bytes(4, byteorder='little')
if (s_flags & 2) != 0:
b_s += pointer.to_bytes(4, byteorder='little')
(b_s_box, pointer) = exportPmbBOX(pmb_tree[4], pointer)
else:
b_s += (0).to_bytes(4, byteorder='little')
if (s_flags & 4) != 0:
b_s += pointer.to_bytes(4, byteorder='little')
(b_s_text, pointer) = exportPmbTEXT(pmb_tree[5], pmb_flags, pointer)
else:
b_s += (0).to_bytes(4, byteorder='little')
b_s += b_s_func + b_ss + b_s_image + b_s_box + b_s_text
return b_s, pointer
def exportPmbFUNC(pmb_tree, pmb_flags, pointer):
enc = checkEncodingFromPmbFlags(pmb_flags)
pointer += 0x4
b_func = pointer.to_bytes(4, byteorder='little') # FUNC_label_offset
func_label = pmb_tree[1] # FUNC_label
b_func += helper.setZeroPaddingForLabel(func_label, helper.getLen(func_label, enc), enc)
pointer += helper.getLen(func_label, enc)
return b_func, pointer
def exportPmbIMAGE(pmb_tree, pointer):
b_image = pmb_tree[1].to_bytes(4, byteorder='little')
b_image += pmb_tree[2].to_bytes(4, byteorder='little')
b_image += pmb_tree[3].to_bytes(4, byteorder='little')
b_image += pmb_tree[4].to_bytes(4, byteorder='little')
b_image += pmb_tree[5].to_bytes(4, byteorder='little')
b_image += pmb_tree[6].to_bytes(4, byteorder='little')
b_image += pmb_tree[7].to_bytes(4, byteorder='little')
b_image += pmb_tree[8].to_bytes(4, byteorder='little')
b_image += pmb_tree[9].to_bytes(4, byteorder='little')
pointer += 0x24
return b_image, pointer
def exportPmbBOX(pmb_tree, pointer):
b_box = pmb_tree[1].to_bytes(4, byteorder='little')
b_box += pmb_tree[2].to_bytes(4, byteorder='little')
b_box += pmb_tree[3].to_bytes(4, byteorder='little')
b_box += pmb_tree[4].to_bytes(4, byteorder='little')
pointer += 0x10
return b_box, pointer
def exportPmbTEXT(pmb_tree, pmb_flags, pointer):
enc = checkEncodingFromPmbFlags(pmb_flags)
b_text = pmb_tree[1].to_bytes(2, byteorder='little')
b_text += pmb_tree[2].to_bytes(2, byteorder='little')
if (pmb_flags & 0x10) != 0:
b_text += pmb_tree[3].to_bytes(4, byteorder='little')
pointer += 0x4
pointer += 0xC
text_label1 = pmb_tree[4]
text_label2 = pmb_tree[5]
b_text += pointer.to_bytes(4, byteorder='little')
pointer += helper.getLen(text_label1, enc)
b_text += pointer.to_bytes(4, byteorder='little')
b_text += helper.setZeroPaddingForLabel(text_label1, helper.getLen(text_label1, enc), enc)
b_text += helper.setZeroPaddingForLabel(text_label2, helper.getLen(text_label2, enc), enc)
pointer += helper.getLen(text_label2, enc)
return b_text, pointer
def exportPmbLAYER(pmb_tree, chr_size, pmb_flags, pointer):
enc = 'euc_jp'
b_layer = b""
b_layer += helper.setZeroPaddingForLabel(pmb_tree[0][1], chr_size, enc) # LAYER_label
s1_size = len(pmb_tree[1])
b_layer += s1_size.to_bytes(4, byteorder='little') # LAYER_SUB1_size
pointer += chr_size + 0x10
b_layer += pointer.to_bytes(4, byteorder='little') # LAYER_SUB1_offset
b_s1 = b""
b_s1_offset = b""
s1_offset_list = []
pointer += 0x4 * s1_size
for i in range(s1_size):
s1_offset_list.append(pointer.to_bytes(4, byteorder='little'))
(b, pointer) = exportPmbSUB(pmb_tree[1][i], chr_size, pmb_flags, pointer)
b_s1 += b
for i in range(s1_size):
b_s1_offset += s1_offset_list[i]
s2_size = len(pmb_tree[2])
b_layer += s2_size.to_bytes(4, byteorder='little') # LAYER_SUB2_size
b_layer += pointer.to_bytes(4, byteorder='little') # LAYER_SUB2_offset
b_s2 = b""
b_s2_offset = b""
s2_offset_list = []
pointer += 0x4 * s2_size
for i in range(s2_size):
s2_offset_list.append(pointer.to_bytes(4, byteorder='little'))
(b, pointer) = exportPmbSUB(pmb_tree[2][i], chr_size, pmb_flags, pointer)
b_s2 += b
for i in range(s2_size):
b_s2_offset += s2_offset_list[i]
b_layer += b_s1_offset + b_s1 + b_s2_offset + b_s2
return b_layer, pointer
def exportPmbPBIN(pbin, pmb_flags, nuber_of_img, pbin_offset, pointer):
pbin_pointer = pbin_offset
# Init binary data
b_i_l = b''
b_i_m = b''
img_size_list = []
img_offset_list = []
img_out_offset_list = []
for i in range(nuber_of_img):
img_size = helper.unpackOneFormat("L", pbin, pbin_pointer, 0)
img_offset = helper.unpackOneFormat("L", pbin, pbin_pointer + 0x4, 0)
img_size_list.append(img_size)
img_offset_list.append(img_offset)
img_out_offset_list.append(img_offset + pointer - pbin_offset)
pbin_pointer += 0x8
padding = 0
pointer += pbin_pointer - pbin_offset
first_img_header = pbin[img_offset_list[0]:img_offset_list[0] + 4]
if first_img_header[:2] == b'\x1f\x8b':
(b_i_m, padding, pointer) = setPBINPadding(b_i_m, padding, pointer, 4)
elif first_img_header == b'Tex1':
(b_i_m, padding, pointer) = setPBINPadding(b_i_m, padding, pointer, 0x10)
else:
raise ValueError("error!")
for i in range(nuber_of_img):
# Create binary data for IMG list
b_i_l += img_size_list[i].to_bytes(4, byteorder="little")
b_i_l += (img_out_offset_list[i] + padding).to_bytes(4, byteorder="little")
# Acquisition of img file (if statement is processed when i+1 is out of range)
if i + 1 == nuber_of_img:
img = pbin[img_offset_list[i]:]
else:
img = pbin[img_offset_list[i]:img_offset_list[i + 1]]
pointer += len(img)
# gz or img
if i + 1 != nuber_of_img:
next_img_header = pbin[img_offset_list[i + 1]:img_offset_list[i + 1] + 4]
if next_img_header[:2] == b'\x1f\x8b':
(img, padding, pointer) = setPBINPadding(img, padding, pointer, 4)
elif next_img_header == b'Tex1':
(img, padding, pointer) = setPBINPadding(img, padding, pointer, 0x10)
else:
raise ValueError("error!")
elif (pmb_flags & 0x40) != 0:
(img, padding, pointer) = setPBINPadding(img, padding, pointer, 4)
# Add img to binary
b_i_m += img
# Add all binary elements.
b_img = b_i_l + b_i_m
return b_img
def setPBINPadding(img, padding, pointer, num):
if pointer % num != 0:
p = num - (pointer % num)
img += b'\x50' * p
padding += p
pointer += p
return img, padding, pointer
def exportPmbControlFlags(pmb_tree, output_pmb_flags):
input_name = pmb_tree[1]
output_name = [k for k, v in pmb_dict.items() if v == output_pmb_flags][0]
for i in range(len(pmb_control_flags_list)):
if pmb_control_flags_list[i][0] == [input_name, output_name]:
pmb_tree = exportPmbControlFlagsMAIN(pmb_tree, pmb_control_flags_list[i][1])
return pmb_tree
def exportPmbControlFlagsMAIN(pmb_tree, pcf_list):
for i in range(len(pmb_tree[2])):
pmb_tree[2][i][1] = exportPmbControlFlagsSUB(pmb_tree[2][i][1], pcf_list)
for j in range(len(pmb_tree[2][i][2])):
pmb_tree[2][i][2][j][1] = exportPmbControlFlagsSUB(pmb_tree[2][i][2][j][1], pcf_list)
pmb_tree[2][i][2][j][2] = exportPmbControlFlagsSUB(pmb_tree[2][i][2][j][2], pcf_list)
return pmb_tree
def exportPmbControlFlagsSUB(pmb_tree, pcf_list):
for i in range(len(pmb_tree)):
pmb_control_flags = pmb_tree[i][0][2]
if pmb_control_flags & pcf_list[0] == pcf_list[0] and pmb_control_flags & pcf_list[1] == 0:
pmb_tree[i][0][2] = pmb_control_flags - pcf_list[0] + pcf_list[1]
if len(pmb_tree[i][2]) != 0:
pmb_tree[i][2] = exportPmbControlFlagsSUB(pmb_tree[i][2], pcf_list)
return pmb_tree
def checkPmb(path, pmb_flags):
pmb_source = path.read_bytes()
# Import PMB
(pmb_tree, pbin) = importPmb(pmb_source, pmb_flags)
# Export PMB
pmb_out = exportPmb(pmb_tree, pbin, pmb_flags)
# Check for correct data
if pmb_out == pmb_source:
return True
else:
return False
| 38.625187 | 118 | 0.624229 |
41e03ed1bcf8f78e42b1200faf650dff19991ec9
| 18,525 |
py
|
Python
|
bioslds/nsm.py
|
ttesileanu/bio-time-series
|
da14482422b56c2e750a0044866788f4a87dde12
|
[
"MIT"
] | null | null | null |
bioslds/nsm.py
|
ttesileanu/bio-time-series
|
da14482422b56c2e750a0044866788f4a87dde12
|
[
"MIT"
] | null | null | null |
bioslds/nsm.py
|
ttesileanu/bio-time-series
|
da14482422b56c2e750a0044866788f4a87dde12
|
[
"MIT"
] | 1 |
2022-03-07T22:22:24.000Z
|
2022-03-07T22:22:24.000Z
|
""" Define a biologically-plausible online learner using a similarity matching
objective.
"""
import numpy as np
from bioslds.monitor import AttributeMonitor
from types import SimpleNamespace
from typing import Optional, Sequence, Union, Callable
from numba import njit
class NonRecurrent(object):
""" Solve argmin ||X^T X - Y^T Y||^2_F, online, potentially with a
non-negativity constraint.
This uses the non-recurrent algorithm from Minden, Pehlevan, Chklovskii (2018).
Attributes
----------
n_components : int
Number of components in the output.
rate : float
Learning rate or learning rate schedule.
tau : float
Ratio between learning rates for feed-forward connection strengths and lateral
connection strengths, respectively.
scalings : np.ndarray
Amounts by which the principal components are scaled at the optimum,
corresponding to the `Lambda` matrix in Minden, Pehlevan, Chklovskii.
non_negative : bool
Whether the outputs are forced to be non-negative.
whiten : bool
Whether the outputs should be whitened.
n_features : int
Number of features in the training data.
output_ : np.ndarray
Current output values.
weights_ : np.ndarray
Current input-output connection strengths, shape
`(n_components, n_features_)`. In the long term, this should converge to
`Y @ X.T`, where `X` and `Y` are input and output matrices with one
sample per column.
lateral_ : np.ndarray
Current lateral connection strengths, shape
`(n_components, n_components)`. In the long term, this should converge to
`isc @ Y @ Y.T @ isc` where `isc = np.inv(pc_scalings)`.
n_samples_ : int
Number of samples seen.
"""
def __init__(
self,
n_features: Optional[int] = None,
n_components: Optional[int] = None,
weights: Optional[Sequence] = None,
lateral: Optional[Sequence] = None,
tau: float = 0.5,
rate: Union[float, Sequence, Callable[[float], float]] = 0.001,
scalings: Optional[Sequence] = None,
non_negative: bool = False,
whiten: bool = False,
rng: Optional[Union[int, np.random.Generator]] = None,
):
""" Initialize the circuit.
Parameters
----------
n_features
Number of dimensions in the input. If not provided, this is inferred from
the shape of `weights`.
n_components
Number of dimensions in the output. This must be provided unless one of
`weights` or `lateral` is given.
weights
Initial input-output connection strengths, should have shape `(n_components,
n_features)`. Set to all 1 by default, unless `rng` is provided.
lateral
Initial lateral connection strengths, should have shape `(n_components,
n_components)`. It will be automatically symmetrized and made
positive-definite, if necessary. These connection strengths are set to the
identity matrix if not explicitly provided, unless `rng` is used.
tau
Ratio between learning rates for feed-forward connection strengths and
lateral connection strengths, respectively.
rate
Learning rate or learning schedule for feed-forward weights. If this is a
sequence, the `i`th element is used as the learning rate at the `i`th step.
The last element is used for any steps beyond the length of the sequence. If
this is a callable, the learning rate is obtained by calling it with the
current step number as an argument.
scalings
Amounts by which the principal components are scaled in the optimum
solution. This corresponds to the diagonal elements of the `Lambda` matrix
in Minden, Pehlevan, Chklovskii (2018), and is useful for breaking the
degeneracy in the solution set. By default it is set to all 1, which implies
*no* degeneracy breaking.
non_negative
Set to true to force the outputs to be non-negative.
whiten
Set to true to impose a whitening constraint on the output (as in the PSW
problem from Minden, Pehlevan, Chklovskii (2018)).
rng
Random number generator or seed used to initialize the weights. If not
provided, the initial values from above are used.
"""
# copy over the trivial parameters
self.tau = tau
self.non_negative = non_negative
self.whiten = whiten
if callable(rate) or not hasattr(rate, "__len__"):
self.rate = rate
else:
self.rate = np.array(rate)
self._learning_rate_vector = None
# infer input and output dimensions
if weights is not None:
self.n_components, self.n_features = np.shape(weights)
else:
if n_features is None:
raise ValueError(
"Need either weights or n_features to specify the "
"dimension of the inputs."
)
self.n_features = n_features
if lateral is not None:
self.n_components = len(lateral)
elif n_components is not None:
self.n_components = n_components
else:
raise ValueError(
"Need either weights, lateral, or n_components to specify the "
"dimension of the outputs."
)
# check that all dimensions match
if (
n_features is not None
and weights is not None
and n_features != np.shape(weights)[1]
):
raise ValueError("Weights shape does not match n_features.")
# I don't know why but the type checker thinks weights is None here...
# noinspection PyTypeChecker
if (
n_components is not None
and weights is not None
and n_components != np.shape(weights)[0]
):
raise ValueError("Weights shape does not match n_components.")
# noinspection PyTypeChecker
if lateral is not None and weights is not None and len(lateral) != len(weights):
raise ValueError("Weights and lateral shapes do not match.")
if lateral is not None and len(lateral) != np.shape(lateral)[1]:
raise ValueError("Lateral must be a square matrix.")
# handle some defaults
if scalings is not None:
self.scalings = np.array(scalings)
else:
self.scalings = np.ones(self.n_components)
# construct the state variables
# initial value of output doesn't matter: gets replaced with every `transform`
self.output_ = np.zeros(self.n_components)
# use rng to initialize weights, if provided
if rng is not None:
rng = np.random.default_rng(rng)
# initialize connection weights
if weights is not None:
self.weights_ = np.array(weights, dtype=float)
elif rng is not None:
self.weights_ = rng.normal(
size=(self.n_components, self.n_features)
) / np.sqrt(self.n_features)
else:
self.weights_ = np.ones((self.n_components, self.n_features))
if lateral is not None:
# make sure m0 is symmetric and positive definite
lateral = np.asarray(lateral, dtype=float)
lateral = 0.5 * (lateral + lateral.T)
evals, evecs = np.linalg.eigh(lateral)
# XXX should make clipping threshold configurable
clipping_threshold = 1e-6
if np.any(evals <= clipping_threshold):
# clip eigenvalues that are smaller than a small, positive value
evals = np.clip(evals, clipping_threshold, None)
lateral = evecs @ np.diag(evals) @ evecs.T
self.lateral_ = np.array(lateral)
else:
self.lateral_ = np.eye(self.n_components)
# initialize step counter
self.n_samples_ = 0
self._mode = "numba"
# noinspection PyUnusedLocal
def transform(
self,
X: Sequence,
y: None = None,
progress: Optional[Callable] = None,
monitor: Optional[AttributeMonitor] = None,
chunk_hint: int = 1000,
) -> np.ndarray:
""" Feed data to the circuit, updating the output and the weights.
Note that in this implementation, the non-negativity of the outputs is enforced
before the synaptic plasticity updates take place.
Parameters
----------
X
Dataset to feed into the circuit. Shape `(n_samples, n_features)`.
y
Unused.
progress
Progress function that can be used either as a wrapper or manually, like
`tqdm.tqdm`. Specifically, this needs to support wrapping an iterable, so
you can write, e.g., `for x in progress(X): ...`; and it needs to support
calling with a `total` argument, an `update`, and a `close` method, e.g.:
pbar = progress(total=100)
for i in range(100):
pbar.update(1) # note that arg is step (=1), not i!
pbar.close()
monitor
An object for monitoring the evolution of the parameters during learning
(e.g., an instance of `AttributeMonitor`). Parameter values are stored and
calculated before their updates.
chunk_hint
A hint about how to chunk the learning. This may or may not be used. If it
is, the progress function will only be called once per chunk.
Returns `self`.
"""
# figure out per-step rates
n = len(X)
if callable(self.rate):
self._learning_rate_vector = np.array(
[self.rate(self.n_samples_ + _) for _ in range(n)]
)
elif hasattr(self.rate, "__len__"):
n0 = self.n_samples_
n1 = n0 + n
if n1 <= len(self.rate):
self._learning_rate_vector = self.rate[n0:n1]
else:
if n0 < len(self.rate):
self._learning_rate_vector = self.rate[n0:]
n_extra = n1 - len(self.rate)
else:
self._learning_rate_vector = []
n_extra = n1 - n0
last_rate = self.rate[-1]
self._learning_rate_vector = np.hstack(
(self._learning_rate_vector, n_extra * [last_rate])
)
else:
self._learning_rate_vector = np.repeat(self.rate, n)
if monitor is not None:
monitor.setup(n)
fct_mapping = {
"naive": self._transform_naive,
"numba": self._transform_numba,
}
fct = fct_mapping[self._mode]
# noinspection PyArgumentList
res = fct(X, progress=progress, monitor=monitor, chunk_hint=chunk_hint)
return res
# noinspection PyUnusedLocal
def _transform_naive(
self, X: Sequence, progress, monitor, chunk_hint: int
) -> np.ndarray:
it = X if progress is None else progress(X)
out_history = np.zeros((len(X), self.n_components))
for i, x in enumerate(it):
out_history[i] = self.output_
if monitor is not None:
monitor.record(self)
self._feed(x, self._learning_rate_vector[i])
return out_history
def _transform_numba(
self,
X: Sequence,
progress: Optional[Callable],
monitor: Optional[AttributeMonitor],
chunk_hint: int,
) -> np.ndarray:
if chunk_hint < 1:
chunk_hint = 1
# handle progress function
n = len(X)
if progress is not None:
pbar = progress(total=n)
else:
pbar = None
# set up monitor, if any
if monitor is not None:
monitor.setup(n)
X = np.asarray(X, dtype=float)
out_history = np.zeros((len(X), self.n_components))
for chunk_start in range(0, n, chunk_hint):
crt_range = slice(chunk_start, chunk_start + chunk_hint)
crt_X = X[crt_range]
crt_n = len(crt_X)
crt_weights = np.zeros((crt_n, self.n_components, self.n_features))
crt_lateral = np.zeros((crt_n, self.n_components, self.n_components))
# crt_output = np.zeros((crt_n, self.n_components))
crt_history = SimpleNamespace(
weights_=crt_weights,
lateral_=crt_lateral,
output_=out_history[crt_range],
)
self._transform_numba_chunk(
crt_X, crt_range=crt_range, crt_history=crt_history
)
if pbar is not None:
pbar.update(crt_n)
if monitor is not None:
monitor.record_batch(crt_history)
if pbar is not None:
pbar.close()
return out_history
def _transform_numba_chunk(
self, X: np.ndarray, crt_range: slice, crt_history: SimpleNamespace
):
_perform_transform(
X,
self._learning_rate_vector[crt_range],
self.tau,
self.scalings,
self.non_negative,
self.whiten,
self.weights_,
self.lateral_,
self.output_,
crt_history.weights_,
crt_history.lateral_,
crt_history.output_,
)
self.n_samples_ += len(X)
def _feed(self, x: Sequence, learning_rate: float):
""" Feed a single data sample into the circuit.
Parameters
----------
x
Sample to feed.
learning_rate
Learning rate to use.
"""
# following the first steps from Algorithms 1 and 2 in Minden, Pehlevan,
# Chklovskii (2018).
diag_m = np.diag(self.lateral_)
inv_diag_m = 1 / diag_m
m_off = self.lateral_ - np.diag(diag_m)
# the matrix multiplication by the diagonal M_d^{-1} matrix is
# equivalent to an element-wise multiplication using broadcast
y_tilde = inv_diag_m * (self.weights_ @ x)
# now correcting for the off-diagonal terms
self.output_ = y_tilde - inv_diag_m * (m_off @ y_tilde)
if self.non_negative:
self.output_ = np.clip(self.output_, 0, None)
if learning_rate != 0:
self.weights_ += learning_rate * (np.outer(self.output_, x) - self.weights_)
if not self.whiten:
scaled_m = self.scalings * self.lateral_ * self.scalings[:, None]
self.lateral_ += (learning_rate / self.tau) * (
np.outer(self.output_, self.output_) - scaled_m
)
else:
self.lateral_ += (learning_rate / self.tau) * (
np.outer(self.output_, self.output_) - np.diag(self.scalings ** 2)
)
self.n_samples_ += 1
def clone(self):
""" Make a clone of the current instance. """
clone = NonRecurrent(
n_features=self.n_features,
n_components=self.n_components,
weights=self.weights_,
lateral=self.lateral_,
tau=self.tau,
rate=self.rate,
scalings=self.scalings,
non_negative=self.non_negative,
whiten=self.whiten,
)
clone.n_samples_ = self.n_samples_
clone.output_ = np.copy(self.output_)
return clone
def __repr__(self):
return (
"NonRecurrent(n_features={}, n_components={}, non_negative={}, "
"whiten={}, rate={}, tau={}, scalings={}, output_={},\n"
"weights_={},\nlateral_={})".format(
self.n_features,
self.n_components,
self.non_negative,
self.whiten,
self.rate,
self.tau,
self.scalings,
self.output_,
self.weights_,
self.lateral_,
)
)
def __str__(self):
return (
"NonRecurrent(n_features={}, n_components={}, non_negative={}, "
"whiten={}, rate={}, tau={})".format(
self.n_features,
self.n_components,
self.non_negative,
self.whiten,
self.rate,
self.tau,
)
)
_available_modes = ["naive", "numba"]
@njit
def _perform_transform(
x: np.ndarray,
rate: np.ndarray,
tau: float,
scalings: np.ndarray,
non_negative: bool,
whiten: bool,
weights: np.ndarray,
lateral: np.ndarray,
output: np.ndarray,
history_weights: np.ndarray,
history_lateral: np.ndarray,
history_output: np.ndarray,
):
n = len(x)
scalings_T = np.atleast_2d(scalings).T
scalings_2 = np.diag(scalings ** 2)
for i in range(n):
crt_x = x[i]
crt_rate = rate[i]
history_weights[i] = weights
history_lateral[i] = lateral
history_output[i] = output
# following the first steps from Algorithms 1 and 2 in Minden, Pehlevan,
# Chklovskii (2018).
diag_m = np.diag(lateral)
inv_diag_m = 1 / diag_m
# m_off = np.copy(lateral)
# np.fill_diagonal(m_off, 0)
m_off = lateral - np.diag(diag_m)
# the matrix multiplication by the diagonal M_d^{-1} matrix is equivalent to an
# element-wise multiplication using broadcast
y_tilde = inv_diag_m * (weights @ crt_x)
# now correcting for the off-diagonal terms
output[:] = y_tilde - inv_diag_m * (m_off @ y_tilde)
if non_negative:
output[output < 0] = 0
if crt_rate != 0:
weights += crt_rate * (np.outer(output, crt_x) - weights)
if not whiten:
scaled_m = scalings * lateral * scalings_T
lateral += (crt_rate / tau) * (np.outer(output, output) - scaled_m)
else:
lateral += (crt_rate / tau) * (np.outer(output, output) - scalings_2)
| 35.831721 | 88 | 0.576248 |
56f331d448f77525796c2f5e269b4248e979f5c6
| 14,158 |
py
|
Python
|
src/lama/modules/roberta_connector.py
|
Chandrahasd/OKGIT
|
16f4dbbfca1020809d3bae0445ee564fa8af9193
|
[
"MIT"
] | 4 |
2021-08-10T12:43:57.000Z
|
2022-03-29T04:09:23.000Z
|
src/lama/modules/roberta_connector.py
|
Chandrahasd/OKGIT
|
16f4dbbfca1020809d3bae0445ee564fa8af9193
|
[
"MIT"
] | 1 |
2021-07-14T02:20:20.000Z
|
2021-08-04T23:41:22.000Z
|
src/lama/modules/roberta_connector.py
|
Chandrahasd/OKGIT
|
16f4dbbfca1020809d3bae0445ee564fa8af9193
|
[
"MIT"
] | 1 |
2021-09-11T10:05:26.000Z
|
2021-09-11T10:05:26.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from fairseq.models.roberta import RobertaModel
from fairseq import utils
import torch
from pytorch_pretrained_bert import BertTokenizer
from lama.modules.base_connector import *
from lama.modules.bert_connector import CustomBaseTokenizer
class RobertaVocab(object):
def __init__(self, roberta):
self.roberta = roberta
def __getitem__(self, arg):
value = ""
try:
predicted_token_bpe = self.roberta.task.source_dictionary.string([arg])
if (
predicted_token_bpe.strip() == ROBERTA_MASK
or predicted_token_bpe.strip() == ROBERTA_START_SENTENCE
):
value = predicted_token_bpe.strip()
else:
value = self.roberta.bpe.decode(str(predicted_token_bpe)).strip()
except Exception as e:
print(arg)
print(predicted_token_bpe)
print(value)
print("Exception {} for input {}".format(e, arg))
return value
class Roberta(Base_Connector):
def __init__(self, args):
super().__init__()
roberta_model_dir = args.roberta_model_dir
roberta_model_name = args.roberta_model_name
roberta_vocab_name = args.roberta_vocab_name
self.dict_file = "{}/{}".format(roberta_model_dir, roberta_vocab_name)
self.model = RobertaModel.from_pretrained(
roberta_model_dir, checkpoint_file=roberta_model_name
)
self.bpe = self.model.bpe
self.task = self.model.task
self._build_vocab()
self._init_inverse_vocab()
self.max_sentence_length = args.max_sentence_length
# CD: Add custom tokenizer to avoid splitting the ['MASK'] token
# self.tokenizer = BertTokenizer.from_pretrained(dict_file)
# custom_basic_tokenizer = CustomBaseTokenizer(do_lower_case = do_lower_case)
# self.tokenizer.basic_tokenizer = custom_basic_tokenizer
def _cuda(self):
self.model.cuda()
def _build_vocab(self):
self.vocab = []
for key in range(ROBERTA_VOCAB_SIZE):
predicted_token_bpe = self.task.source_dictionary.string([key])
try:
value = self.bpe.decode(predicted_token_bpe)
if value[0] == " ": # if the token starts with a whitespace
value = value.strip()
else:
# this is subword information
value = "_{}_".format(value)
if value in self.vocab:
# print("WARNING: token '{}' is already in the vocab".format(value))
value = "{}_{}".format(value, key)
self.vocab.append(value)
except Exception as e:
self.vocab.append(predicted_token_bpe.strip())
def get_id(self, input_string):
# Roberta predicts ' London' and not 'London'
string = " " + str(input_string).strip()
text_spans_bpe = self.bpe.encode(string.rstrip())
tokens = self.task.source_dictionary.encode_line(
text_spans_bpe, append_eos=False
)
return tokens.long()
def get_batch_generation(self, sentences_list, logger=None, try_cuda=True):
if not sentences_list:
return None
if try_cuda:
self.try_cuda()
tensor_list = []
masked_indices_list = []
max_len = 0
output_tokens_list = []
for masked_inputs_list in sentences_list:
tokens_list = []
for idx, masked_input in enumerate(masked_inputs_list):
# 2. sobstitute [MASK] with <mask>
masked_input = masked_input.replace(MASK, ROBERTA_MASK)
text_spans = masked_input.split(ROBERTA_MASK)
text_spans_bpe = (
(" {0} ".format(ROBERTA_MASK))
.join(
[
self.bpe.encode(text_span.rstrip())
for text_span in text_spans
]
)
.strip()
)
prefix = ""
if idx == 0:
prefix = ROBERTA_START_SENTENCE
tokens_list.append(
self.task.source_dictionary.encode_line(
prefix + " " + text_spans_bpe, append_eos=True
)
)
tokens = torch.cat(tokens_list)[: self.max_sentence_length]
output_tokens_list.append(tokens.long().cpu().numpy())
if len(tokens) > max_len:
max_len = len(tokens)
tensor_list.append(tokens)
masked_index = (tokens == self.task.mask_idx).nonzero().numpy()
for x in masked_index:
masked_indices_list.append([x[0]])
pad_id = self.task.source_dictionary.pad()
tokens_list = []
for tokens in tensor_list:
pad_lenght = max_len - len(tokens)
if pad_lenght > 0:
pad_tensor = torch.full([pad_lenght], pad_id, dtype=torch.int)
tokens = torch.cat((tokens, pad_tensor))
tokens_list.append(tokens)
batch_tokens = torch.stack(tokens_list)
with torch.no_grad():
# with utils.eval(self.model.model):
self.model.eval()
self.model.model.eval()
log_probs, extra = self.model.model(
batch_tokens.long().to(device=self._model_device),
features_only=False,
return_all_hiddens=False,
)
return log_probs.cpu(), output_tokens_list, masked_indices_list
def __get_input_tensors(self, sentences):
if len(sentences) > 2:
print(sentences)
raise ValueError("BERT accepts maximum two sentences in input for each data point")
first_tokenized_sentence = self.tokenizer.tokenize(sentences[0])
first_segment_id = np.zeros(len(first_tokenized_sentence), dtype=int).tolist()
# add [SEP] token at the end
first_tokenized_sentence.append(BERT_SEP)
first_segment_id.append(0)
if len(sentences)>1 :
second_tokenized_sentece = self.tokenizer.tokenize(sentences[1])
second_segment_id = np.full(len(second_tokenized_sentece),1, dtype=int).tolist()
# add [SEP] token at the end
second_tokenized_sentece.append(BERT_SEP)
second_segment_id.append(1)
tokenized_text = first_tokenized_sentence + second_tokenized_sentece
segments_ids = first_segment_id + second_segment_id
else:
tokenized_text = first_tokenized_sentence
segments_ids = first_segment_id
# add [CLS] token at the beginning
tokenized_text.insert(0,BERT_CLS)
segments_ids.insert(0,0)
# look for masked indices
masked_indices = []
for i in range(len(tokenized_text)):
token = tokenized_text[i]
if token == MASK:
masked_indices.append(i)
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
return tokens_tensor, segments_tensors, masked_indices, tokenized_text
def __get_input_tensors_batch(self, sentences_list):
tokens_tensors_list = []
segments_tensors_list = []
masked_indices_list = []
tokenized_text_list = []
max_tokens = 0
for sentences in sentences_list:
tokens_tensor, segments_tensor, masked_indices, tokenized_text = self.__get_input_tensors(sentences)
tokens_tensors_list.append(tokens_tensor)
segments_tensors_list.append(segments_tensor)
masked_indices_list.append(masked_indices)
tokenized_text_list.append(tokenized_text)
# assert(tokens_tensor.shape[1] == segments_tensor.shape[1])
if (tokens_tensor.shape[1] > max_tokens):
max_tokens = tokens_tensor.shape[1]
# print("MAX_TOKENS: {}".format(max_tokens))
# apply padding and concatenate tensors
# use [PAD] for tokens and 0 for segments
final_tokens_tensor = None
final_segments_tensor = None
final_attention_mask = None
for tokens_tensor, segments_tensor in zip(tokens_tensors_list, segments_tensors_list):
dim_tensor = tokens_tensor.shape[1]
pad_lenght = max_tokens - dim_tensor
attention_tensor = torch.full([1,dim_tensor], 1, dtype= torch.long)
if pad_lenght>0:
pad_1 = torch.full([1,pad_lenght], self.pad_id, dtype= torch.long)
pad_2 = torch.full([1,pad_lenght], 0, dtype= torch.long)
attention_pad = torch.full([1,pad_lenght], 0, dtype= torch.long)
tokens_tensor = torch.cat((tokens_tensor,pad_1), dim=1)
segments_tensor = torch.cat((segments_tensor,pad_2), dim=1)
attention_tensor = torch.cat((attention_tensor,attention_pad), dim=1)
if final_tokens_tensor is None:
final_tokens_tensor = tokens_tensor
final_segments_tensor = segments_tensor
final_attention_mask = attention_tensor
else:
final_tokens_tensor = torch.cat((final_tokens_tensor,tokens_tensor), dim=0)
final_segments_tensor = torch.cat((final_segments_tensor,segments_tensor), dim=0)
final_attention_mask = torch.cat((final_attention_mask,attention_tensor), dim=0)
# print(final_tokens_tensor)
# print(final_segments_tensor)
# print(final_attention_mask)
# print(final_tokens_tensor.shape)
# print(final_segments_tensor.shape)
# print(final_attention_mask.shape)
return final_tokens_tensor, final_segments_tensor, final_attention_mask, masked_indices_list, tokenized_text_list
def get_contextual_embeddings_with_mask_indices(self, sentences_list, try_cuda=True):
# TBA
if not sentences_list:
return None
if try_cuda:
self.try_cuda()
tensor_list = []
masked_indices_list = []
max_len = 0
output_tokens_list = []
for masked_inputs_list in sentences_list:
tokens_list = []
for idx, masked_input in enumerate(masked_inputs_list):
# 2. sobstitute [MASK] with <mask>
masked_input = masked_input.replace(MASK, ROBERTA_MASK)
text_spans = masked_input.split(ROBERTA_MASK)
text_spans_bpe = (
(" {0} ".format(ROBERTA_MASK))
.join(
[
self.bpe.encode(text_span.rstrip())
for text_span in text_spans
]
)
.strip()
)
prefix = ""
if idx == 0:
prefix = ROBERTA_START_SENTENCE
tokens_list.append(
self.task.source_dictionary.encode_line(
prefix + " " + text_spans_bpe, append_eos=True
)
)
tokens = torch.cat(tokens_list)[: self.max_sentence_length]
output_tokens_list.append(tokens.long().cpu().numpy())
if len(tokens) > max_len:
max_len = len(tokens)
tensor_list.append(tokens)
masked_index = (tokens == self.task.mask_idx).nonzero().numpy()
for x in masked_index:
masked_indices_list.append([x[0]])
pad_id = self.task.source_dictionary.pad()
tokens_list = []
for tokens in tensor_list:
pad_lenght = max_len - len(tokens)
if pad_lenght > 0:
pad_tensor = torch.full([pad_lenght], pad_id, dtype=torch.int)
tokens = torch.cat((tokens, pad_tensor))
tokens_list.append(tokens)
batch_tokens = torch.stack(tokens_list)
with torch.no_grad():
# with utils.eval(self.model.model):
self.model.eval()
self.model.model.eval()
log_probs, extra = self.model.model(
batch_tokens.long().to(device=self._model_device),
features_only=True,
return_all_hiddens=False,
)
# return log_probs.cpu(), output_tokens_list, masked_indices_list
return [log_probs.cpu()], None, None, masked_indices_list
# self.get_batch_generation(sentences_list)
# # assume in input 1 or 2 sentences - in general, it considers only the first 2 sentences
# if not sentences_list:
# return None
# if try_cuda:
# self.try_cuda()
# tokens_tensor, segments_tensor, attention_mask_tensor, masked_indices_list, tokenized_text_list = self.__get_input_tensors_batch(sentences_list)
# with torch.no_grad():
# all_encoder_layers, _ = self.bert_model(
# tokens_tensor.to(self._model_device),
# segments_tensor.to(self._model_device))
# all_encoder_layers = [layer.cpu() for layer in all_encoder_layers]
# sentence_lengths = [len(x) for x in tokenized_text_list]
# # all_encoder_layers: a list of the full sequences of encoded-hidden-states at the end
# # of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
# # encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
# return all_encoder_layers, sentence_lengths, tokenized_text_list
# # return None
| 39.002755 | 154 | 0.598178 |
3dec790aa0759f538be8ec3bd302aa1f600d6cb4
| 184 |
py
|
Python
|
characters_in_range.py
|
GYosifov88/Python-Fundamentals
|
b46ba2822bd2dac6ff46830c6a520e559b448442
|
[
"MIT"
] | null | null | null |
characters_in_range.py
|
GYosifov88/Python-Fundamentals
|
b46ba2822bd2dac6ff46830c6a520e559b448442
|
[
"MIT"
] | null | null | null |
characters_in_range.py
|
GYosifov88/Python-Fundamentals
|
b46ba2822bd2dac6ff46830c6a520e559b448442
|
[
"MIT"
] | null | null | null |
def chars_in_range(a, b):
for n in range (ord(a) + 1, ord(b)):
print(chr(n), end= ' ')
first_char = input()
second_char = input()
chars_in_range(first_char, second_char)
| 20.444444 | 40 | 0.63587 |
b89c1cba791992be0861b55542c2e3be46d91a39
| 1,663 |
py
|
Python
|
boosted.py
|
alihakimtaskiran/GoblinWordGenerator
|
f7fd651ea8f9eb6d6c70be77653f4200ce463b68
|
[
"BSD-3-Clause"
] | 2 |
2019-07-17T08:36:08.000Z
|
2019-07-18T13:56:15.000Z
|
boosted.py
|
alihakimtaskiran/GoblinWordGenerator
|
f7fd651ea8f9eb6d6c70be77653f4200ce463b68
|
[
"BSD-3-Clause"
] | null | null | null |
boosted.py
|
alihakimtaskiran/GoblinWordGenerator
|
f7fd651ea8f9eb6d6c70be77653f4200ce463b68
|
[
"BSD-3-Clause"
] | null | null | null |
# Inteligent Wordlist Generator
#
# By: Sam Junior and UndeadSec
# Goblin Wordlist Generator
# Version: 2.0
#
#
##################
import itertools
ban = '''
'''
print('\n------------------\n\n G 0 B L ! N \033[32m2.0\033[m | WORDGENERATOR\n\n~ by: UndeadSec and Sam Junior:@un00mz\n\n------------------\n')
scale = input('\033[36m[!] provide a size scale [eg: "1 to 8" = 1:8] : ')
start = int(scale.split(':')[0])
final = int(scale.split(':')[1])
use_nouse = str(input("\n\033[36m[?] Do you want to enter personal data ? [y/N]: "))
if use_nouse == 'y':
first_name = str(input("\n\033[36m[*] Fist Name: "))
last_name = str(input("\n\033[36m[*] Last Name: "))
birthday = str(input("\n\033[36m[*] Birthday: "))
month = str(input("\n\033[36m[*] Month: "))
year = str(input("\n\033[36m[*] Year: "))
chrs = first_name + last_name + birthday + month + year
else:
chrs = 'abcçdefghıijklmnoöpqrsştuüvwxyz'
pass
chrs_up = chrs.upper()
chrs_specials = '!\][/?.,~-=";:><@#$%&*()_+\' '
chrs_numerics = '1234567890'
file_name = input('\n\033[36m[!] Insert a name for your wordlist file: ')
arq = open(file_name, 'w')
if input('\n\033[36m[?] Do you want to use uppercase characters? (y/n): ') == 'y':
chrs = ''.join([chrs, chrs_up])
if input('\n\033[36m[?] Do you want to use special characters? (y/n): ') == 'y':
chrs = ''.join([chrs, chrs_specials])
if input('\n\033[36m[?] Do you want to use numeric characters? (y/n): ') == 'y':
chrs = ''.join([chrs, chrs_numerics])
for i in range(start, final+1):
for j in itertools.product(chrs, repeat=i):
temp = ''.join(j)
arq.write(temp + '\n')
arq.close()
| 31.980769 | 145 | 0.580277 |
6081acfc9aadc6295410d6328e411c90bf1ef21c
| 2,426 |
py
|
Python
|
utils/basic/create_test_world.py
|
rafsaf/Plemiona_Planer
|
1a0e2da0c4b18f1abd1df876f688c6442cba17ce
|
[
"Apache-2.0"
] | 2 |
2020-11-15T12:50:07.000Z
|
2020-11-17T21:54:54.000Z
|
utils/basic/create_test_world.py
|
rafsaf/Plemiona_Planer
|
1a0e2da0c4b18f1abd1df876f688c6442cba17ce
|
[
"Apache-2.0"
] | 21 |
2021-11-01T14:04:19.000Z
|
2022-03-25T06:31:03.000Z
|
utils/basic/create_test_world.py
|
rafsaf/Tribal-Wars-Planer
|
083af9b1efe814be3abe975b9ac8faccc00ebb09
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Rafał Safin (rafsaf). All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from base.models import Player, Server, Tribe, VillageModel, World
def create_test_world(server: Server):
test_world = World.objects.create(server=server, postfix="Test")
tribe1 = Tribe.objects.create(tribe_id=0, tag="ALLY", world=test_world)
tribe2 = Tribe.objects.create(tribe_id=1, tag="ENEMY", world=test_world)
ally_villages = []
ally_players = []
enemy_players = []
enemy_villages = []
for i in range(5):
ally_players.append(
Player(tribe=tribe1, world=test_world, player_id=i, name=f"AllyPlayer{i}")
)
enemy_players.append(
Player(
tribe=tribe2, world=test_world, player_id=i + 5, name=f"EnemyPlayer{i}"
)
)
Player.objects.bulk_create(enemy_players)
Player.objects.bulk_create(ally_players)
ally_players = list(Player.objects.filter(world=test_world, player_id__lte=4))
enemy_players = list(Player.objects.filter(world=test_world, player_id__gte=5))
for i in range(50):
ids = i // 10
ally_villages.append(
VillageModel(
world=test_world,
x_coord=100 + i,
y_coord=100 + i,
coord=f"{100+i}|{100+i}",
village_id=i,
player=ally_players[ids],
)
)
enemy_villages.append(
VillageModel(
world=test_world,
x_coord=200 + i,
y_coord=200 + i,
coord=f"{200+i}|{200+i}",
village_id=i + 50,
player=enemy_players[ids],
)
)
VillageModel.objects.bulk_create(enemy_villages)
VillageModel.objects.bulk_create(ally_villages)
| 37.323077 | 87 | 0.606348 |
d0ab89c261f970e16a9c4407620bd16a0df9e9e9
| 216 |
py
|
Python
|
configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py
|
evgps/mmdetection_trashcan
|
aaf4237c2c0d473425cdc7b741d3009177b79751
|
[
"Apache-2.0"
] | 20,190 |
2018-09-10T01:11:53.000Z
|
2022-03-31T22:31:33.000Z
|
configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py
|
evgps/mmdetection_trashcan
|
aaf4237c2c0d473425cdc7b741d3009177b79751
|
[
"Apache-2.0"
] | 6,736 |
2018-09-17T09:45:51.000Z
|
2022-03-31T22:54:10.000Z
|
configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py
|
evgps/mmdetection_trashcan
|
aaf4237c2c0d473425cdc7b741d3009177b79751
|
[
"Apache-2.0"
] | 7,837 |
2018-09-11T02:58:23.000Z
|
2022-03-31T22:31:38.000Z
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=4, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 36 | 74 | 0.699074 |
2ff2f8641367adf6478c7af4407cc524a3921cc8
| 5,928 |
py
|
Python
|
phlibs/config/config.py
|
adambaumeister/panhit
|
f907dee310d401931d96312834e4d43523ddafac
|
[
"Apache-2.0"
] | null | null | null |
phlibs/config/config.py
|
adambaumeister/panhit
|
f907dee310d401931d96312834e4d43523ddafac
|
[
"Apache-2.0"
] | null | null | null |
phlibs/config/config.py
|
adambaumeister/panhit
|
f907dee310d401931d96312834e4d43523ddafac
|
[
"Apache-2.0"
] | null | null | null |
from phlibs.modules import *
from phlibs.Inputs import *
from phlibs.outputs import *
from phlibs.db import JsonDB
import pathlib
import os
import yaml
DEFAULT_CONFIG_PATH=str(pathlib.Path.home()) + os.sep + "panhit.yaml"
class ConfigFile:
def __init__(self):
"""
Initialize the yaml/json panhit configuration file.
The configuration
:param path: (DEFAULT: ~/panhit.yamlPath to configuration file
"""
self.db = None
self.configdb = None
self.tags = []
# Name identifies this configuration spec
self.name = None
# Dictionary of module + options from the configuration file
self.mod_options = {}
# Enabled retrieval modules
self.mods_enabled = []
self.input = {}
self.mods_available = {
'dns': DNSHost,
'panfw': Panfw,
'portscan': Portscan,
}
# Need to improve this, because these objects require args they must be instantiated
# Since fixed...
self.inputs_available = {
"panfw": PanfwInput(),
"dict": ListInput(),
"file": FileInput(),
}
self.outputs_available = {
"panfw": PanfwOutput(),
}
pass
def get_tag_policy(self, tp):
tags = []
for tag in tp["tags"]:
tag_p = self.load_if_str(tag, "tags")
tags.append(tag_p)
return tags
def get_inputs_available(self):
return self.inputs_available
def get_mods_available(self):
mods_available = {}
for mod_name, mod in self.mods_available.items():
mods_available[mod_name] = mod()
return mods_available
def get_outputs_available(self):
return self.outputs_available
def load_from_file(self, path=None):
if not path:
path=DEFAULT_CONFIG_PATH
if os.path.isfile(path):
r = yaml.safe_load(open(path))
self.unpickle(r)
return r
def unpickle(self, r):
for k, v in r.items():
self.__setattr__(k, v)
def load_if_str(self, data, loc):
"""
If the passed param data is a str, load it from the database, otherwise, return it directly
:param data: (str or dict)
:return: (dict) data
"""
cdb = self.get_cdb()
if type(data) is str:
data = cdb.get_in_sub(data, loc)
return data
else:
return data
def load_from_spec(self, spec_data):
"""
Given a spec dict, load all given modules and inputs.
:param spec_data: (dict)
:return: ( inputs, modules )
"""
inputs = []
mods = []
tag_policys = []
output = None
for input_name in spec_data['inputs']:
i_data = self.load_if_str(input_name, loc="input")
i = self.get_input_from_data(i_data)
inputs.append(i)
for mod_name in spec_data['modules']:
mod = self.load_if_str(mod_name, loc="modules")
mod = self.get_module_from_data(mod)
mods.append(mod)
if 'tag_policy' in spec_data:
tag_policy_ref = spec_data['tag_policy']
tag_policy = self.load_if_str(tag_policy_ref, "taglist")
tag_policys = self.get_tag_policy(tag_policy)
if 'output' in spec_data:
output = self.load_if_str(spec_data['output'], "output")
output = self.get_output_from_data(output)
return inputs, mods, tag_policys, output
def init_modules(self, mod_opts):
mods = []
for mod in self.mods_enabled:
if mod in self.mods_available:
new_opts = mod_opts
if mod in self.mod_options:
data = self.load_if_str(self.mod_options[mod], loc="modules")
new_opts.update(data)
mods.append(self.mods_available[mod](new_opts))
else:
raise ValueError("{} is not a valid module.".format(mod))
return mods
def get_input(self, mod_opts):
data = self.input
# If we're passed a string instead of a dictionary, look it up in the database
data = self.load_if_str(data, "input")
data.update(mod_opts)
return self.get_input_from_data(data)
def get_input_from_data(self, data):
if data['type'] == 'file':
i = FileInput(data)
return i
elif data['type'] == 'panfw':
p = PanfwInput(data)
return p
elif data['type'] == 'dict':
l = ListInput(data)
return l
def get_output_from_data(self, data):
if data['type'] == 'panfw':
p = PanfwOutput(data)
return p
def get_module_from_data(self, data):
if data['type'] in self.mods_available:
return self.mods_available[data['type']](data)
def get_output(self, mod_opts):
if self.output['type'] == 'panfw':
mod_opts.update(self.output)
p = Panfw(mod_opts)
return p
elif self.output['type'] == 'table':
output = Table()
return output
def get_db(self):
if self.db:
if self.db['type'] == 'JsonDB':
db_path = self.db['path']
jdb = JsonDB(db_path)
return jdb
def get_cdb(self):
if self.configdb:
if self.configdb['type'] == 'JsonDB':
db_path = self.configdb['path']
jdb = JsonDB(db_path)
# If we get an environ varaible for secret key we enable encryhption for the config database
secret = os.getenv("PANHIT_SECRET")
if secret:
jdb.enable_encryption(secret)
return jdb
| 28.917073 | 108 | 0.553981 |
403d7272e998f0f28f5e387b100b39818792d86e
| 1,278 |
py
|
Python
|
venv/lib/python3.8/site-packages/azureml/_restclient/models/resource_name.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/azureml/_restclient/models/resource_name.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/azureml/_restclient/models/resource_name.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | 2 |
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ResourceName(Model):
"""The Resource Name.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar value: The name of the resource.
:vartype value: str
:ivar localized_value: The localized name of the resource.
:vartype localized_value: str
"""
_validation = {
'value': {'readonly': True},
'localized_value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(self):
super(ResourceName, self).__init__()
self.value = None
self.localized_value = None
| 31.170732 | 77 | 0.561815 |
b83b591ba4016fd31e7c81ee7b8c8146b39e237e
| 984 |
py
|
Python
|
examples/django-weather-web/weather_client/settings.py
|
ericmoritz/hydra-python-client
|
a4f5564600e074ff0e835fe34ce6cb16fb31193d
|
[
"BSD-3-Clause"
] | 1 |
2016-08-28T08:08:07.000Z
|
2016-08-28T08:08:07.000Z
|
examples/django-weather-web/weather_client/settings.py
|
ericmoritz/hydra-python-client
|
a4f5564600e074ff0e835fe34ce6cb16fb31193d
|
[
"BSD-3-Clause"
] | null | null | null |
examples/django-weather-web/weather_client/settings.py
|
ericmoritz/hydra-python-client
|
a4f5564600e074ff0e835fe34ce6cb16fb31193d
|
[
"BSD-3-Clause"
] | null | null | null |
from hydraclient.core.settings import DEFAULT_JSONLD_CONTEXT
import os
###===================================================================
### Added for hydraclient.contrib.django.hydraclient
###===================================================================
CONFIG_URL=os.environ['CONFIG_URL']
DEFAULT_JSONLD_CONTEXT = dict(
DEFAULT_JSONLD_CONTEXT,
**{
"weather": "http://vocab-ld.org/vocab/weather#"
}
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
)
INSTALLED_APPS = [
'hydraclient.contrib.django.hydraclient',
'weather',
]
APPEND_SLASH = False
ROOT_URLCONF = "weather_client.urls"
DEBUG=True
TEMPLATE_DEBUG=True
| 27.333333 | 70 | 0.666667 |
5a12e0cbb8c44e424689de7d218971a831b6d406
| 5,348 |
py
|
Python
|
downstream/insseg/models/resnet.py
|
ut-amrl/ContrastiveSceneContexts
|
622b9cd32ea2dcf8307d25eb2e7ee1c09d220134
|
[
"MIT"
] | 212 |
2019-10-11T19:14:05.000Z
|
2022-03-31T07:08:02.000Z
|
sparseConv/multitask/semseg/models/resnet.py
|
HenrryBryant/Scene-Recognition-in-3D
|
8fb869e1f8e8ff48c6f1082bb75f60a562875fc5
|
[
"MIT"
] | 58 |
2019-10-13T14:49:39.000Z
|
2022-03-24T08:29:41.000Z
|
sparseConv/multitask/semseg/models/resnet.py
|
HenrryBryant/Scene-Recognition-in-3D
|
8fb869e1f8e8ff48c6f1082bb75f60a562875fc5
|
[
"MIT"
] | 45 |
2019-10-23T07:09:23.000Z
|
2022-03-04T09:49:37.000Z
|
import torch.nn as nn
import MinkowskiEngine as ME
from models.model import Model
from models.modules.common import ConvType, NormType, get_norm, conv, sum_pool
from models.modules.resnet_block import BasicBlock, Bottleneck
class ResNetBase(Model):
BLOCK = None
LAYERS = ()
INIT_DIM = 64
PLANES = (64, 128, 256, 512)
OUT_PIXEL_DIST = 32
HAS_LAST_BLOCK = False
CONV_TYPE = ConvType.HYPERCUBE
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
assert self.BLOCK is not None
assert self.OUT_PIXEL_DIST > 0
super(ResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
self.network_initialization(in_channels, out_channels, config, D)
self.weight_initialization()
def network_initialization(self, in_channels, out_channels, config, D):
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
dilations = config.dilations
bn_momentum = config.bn_momentum
self.inplanes = self.INIT_DIM
self.conv1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
D=D)
self.bn1 = get_norm(NormType.BATCH_NORM, self.inplanes, D=self.D, bn_momentum=bn_momentum)
self.relu = ME.MinkowskiReLU(inplace=True)
self.pool = sum_pool(kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), D=D)
self.layer1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[0], 1))
self.layer2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[1], 1))
self.layer3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[2], 1))
self.layer4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[3], 1))
self.final = conv(
self.PLANES[3] * self.BLOCK.expansion, out_channels, kernel_size=1, bias=True, D=D)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, ME.MinkowskiBatchNorm):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
def _make_layer(self,
block,
planes,
blocks,
stride=1,
dilation=1,
norm_type=NormType.BATCH_NORM,
bn_momentum=0.1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
D=self.D),
get_norm(norm_type, planes * block.expansion, D=self.D, bn_momentum=bn_momentum),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
conv_type=self.CONV_TYPE,
D=self.D))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
stride=1,
dilation=dilation,
conv_type=self.CONV_TYPE,
D=self.D))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.final(x)
return x
class ResNet14(ResNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1)
class ResNet18(ResNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2)
class ResNet34(ResNetBase):
BLOCK = BasicBlock
LAYERS = (3, 4, 6, 3)
class ResNet50(ResNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 6, 3)
class ResNet101(ResNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 23, 3)
class STResNetBase(ResNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(STResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class STResNet14(STResNetBase, ResNet14):
pass
class STResNet18(STResNetBase, ResNet18):
pass
class STResNet34(STResNetBase, ResNet34):
pass
class STResNet50(STResNetBase, ResNet50):
pass
class STResNet101(STResNetBase, ResNet101):
pass
class STResTesseractNetBase(STResNetBase):
CONV_TYPE = ConvType.HYPERCUBE
class STResTesseractNet14(STResTesseractNetBase, STResNet14):
pass
class STResTesseractNet18(STResTesseractNetBase, STResNet18):
pass
class STResTesseractNet34(STResTesseractNetBase, STResNet34):
pass
class STResTesseractNet50(STResTesseractNetBase, STResNet50):
pass
class STResTesseractNet101(STResTesseractNetBase, STResNet101):
pass
| 24.645161 | 94 | 0.6365 |
f5264a41043765eecc731c9c9897509af167c4aa
| 28,370 |
py
|
Python
|
bin/tools/config_tool/config_tool.py
|
overtalk/ARK
|
9d314e99dc13684fc672371a0c3fbaa6b9a46d97
|
[
"Apache-2.0"
] | 1 |
2020-02-21T14:32:13.000Z
|
2020-02-21T14:32:13.000Z
|
bin/tools/config_tool/config_tool.py
|
overtalk/ARK
|
9d314e99dc13684fc672371a0c3fbaa6b9a46d97
|
[
"Apache-2.0"
] | null | null | null |
bin/tools/config_tool/config_tool.py
|
overtalk/ARK
|
9d314e99dc13684fc672371a0c3fbaa6b9a46d97
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# encoding=utf-8
# author: NickYang
# date: 2019/04/02
# version: 0.1
# changelog:
# 1. first version
# TODO:
# 1. array
import os
import sys
import datetime
import os.path
import shutil
import subprocess
import argparse
from xml.dom.minidom import Document
from xml.dom import minidom
# self module
import config_param
import config_xml
import config_excel
import importlib
from openpyxl import load_workbook
importlib.reload(sys)
# coding=utf-8
# parse command arguments
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path', type=str,
required=True, help="excel file path")
return vars(parser.parse_args())
def write_entity_enum_head(file, name,cpp_or_cs=True):
if cpp_or_cs:
file.write(u"\tenum class " + name + " :std::uint32_t\n\t{\n\t\tenum_empty,\n")
else:
file.write(u"\tenum " + name + "\n\t{\n\t\tmeta_empty,\n")
def write_entity_enum_member(file, field_name,cpp_or_cs=True):
if cpp_or_cs:
file.write(u"\t\t" + field_name + ",\n")
else:
file.write(u"\t\t" + field_name + ",\n")
def write_entity_enum_end(file,name,cpp_or_cs=True):
if cpp_or_cs:
file.write(u"\t};\n\n")
file.write(u"\tusing " + name + "_rep_type = std::underlying_type<"+ name +">::type;\n\n")
else:
file.write(u"\t}\n\n")
def write_entity_enum_all_member(file, name, entity_dict,cpp_or_cs=True):
if cpp_or_cs:
member_list = entity_dict[name]
for k in range(0, len(member_list), 3):
field_name = member_list[k]
type_name = member_list[k+1]
if(type_name != "class"):
write_entity_enum_member(file, field_name,True)
else:
member_list = entity_dict[name]
for k in range(0, len(member_list), 3):
field_name = member_list[k]
type_name = member_list[k+1]
if(type_name != "class"):
write_entity_enum_member(file, field_name,False)
def write_entity_cpp_head(file, name):
file.write(u"\tclass AFEntityMeta" + name)
file.write(u"\n\t{\n\tpublic:\n")
file.write(u"\t\tAFEntityMeta" + name + "() = default;\n")
file.write(u'''\t\tstatic const std::string& self_name() { static const std:: string meta_%s_ = "%s"; return meta_%s_; }\n\n'''
% (name, name, name))
def write_entity_cs_head(file, name):
file.write(u"\tpublic class AFEntityMeta" + name + "\n\t{\n")
file.write(
u'''\t\tpublic static readonly String self_name = "%s";\n\n''' % name)
def write_entity_cpp_member(file, name,field_name, type_name, sub_class_name=""):
if type_name == "class":
return
# file.write(u'''\t\tstatic const std::string& %s() { static const std::string %s_ = "%s"; return %s_; } // %s - AFEntityMeta%s\n''' %
#(field_name, field_name, field_name, field_name, type_name, sub_class_name))
#file.write(u'''\t\tstatic int& %s_index() { static int %s_index_ = %d ; return %s_index_; } // %s - AFEntityMeta%s\n''' %
# (field_name, field_name, int(field_index), field_name, type_name, sub_class_name))
else:
file.write(u'''\t\tstatic const std::string& %s() { static const std::string %s_ = "%s"; return %s_; } // %s\n''' %
(field_name, field_name, field_name, field_name, type_name))
file.write(u'''\t\tstatic uint32_t %s_index() { static const int %s_index_ = static_cast<%s_rep_type>(%s::%s); return %s_index_; } // %s\n''' %
(field_name, field_name, name,name, field_name, field_name,field_name))
def write_entity_cs_member(file, name,field_name, type_name,sub_class_name=""):
if type_name == "class":
return
#file.write(u'''\t\tpublic static readonly String %s = %s; // %s - AFEntityMeta%s\n''' %
#(field_name, field_name, type_name, sub_class_name))
#file.write(u'''\t\tpublic static readonly Int %s_index = %d; // %s - AFEntityMeta%s\n''' %
#(field_name, int(field_index), type_name, sub_class_name))
else:
file.write(u'''\t\tpublic static readonly String %s = %s; // %s\n''' %
(field_name, field_name, type_name))
file.write(u'''\t\tpublic static UInt32 %s_index = (UInt32)%s.%s; // %s\n''' %
(field_name, name,field_name, field_name))
def write_entity_cpp_end(file):
file.write(u"\t};\n\n")
def write_entity_cs_end(file):
file.write(u"\t};\n\n")
def write_entity_all_member(file,enumclass_name, name, entity_dict, cpp_or_cs=True):
if cpp_or_cs:
# get fields
member_list = entity_dict[name]
for k in range(0, len(member_list), 3):
field_name = member_list[k]
type_name = member_list[k + 1]
sub_class_name = member_list[k + 2]
write_entity_cpp_member(
file, enumclass_name,field_name, type_name, sub_class_name)
else:
# get fields
member_list = entity_dict[name]
for k in range(0, len(member_list), 3):
field_name = member_list[k]
type_name = member_list[k + 1]
sub_class_name = member_list[k + 2]
write_entity_cs_member(
file, enumclass_name,field_name, type_name, sub_class_name)
def write_entity(file, name, entity_dict, cpp_or_cs=True):
if cpp_or_cs:
write_entity_enum_head(file,name,True)
write_entity_enum_all_member(file,name,entity_dict,True)
write_entity_enum_end(file,name,True)
write_entity_cpp_head(file, name)
# get fields
member_list = entity_dict[name]
for k in range(0, len(member_list), 3):
field_name = member_list[k]
type_name = member_list[k + 1]
sub_class_name = member_list[k + 2]
write_entity_cpp_member(
file, name,field_name, type_name, sub_class_name)
write_entity_cpp_end(file)
else:
write_entity_enum_head(file,name,False)
write_entity_enum_all_member(file,name,entity_dict,False)
write_entity_enum_end(file,name,False)
write_entity_cs_head(file, name)
# get parent fields
member_list = entity_dict[name]
for k in range(0, len(member_list), 3):
field_name = member_list[k]
type_name = member_list[k + 1]
sub_class_name = member_list[k + 2]
write_entity_cs_member(
file, name,field_name, type_name, sub_class_name)
write_entity_cs_end(file)
# generate entity_class meta and config
def generate_entity_meta(res_path):
entity_filepath = os.path.join(
res_path, config_param.excel_path, config_param.special_file)
print("start to generate [%s]" % entity_filepath)
excel = config_excel.my_excel(entity_filepath)
# entity parent class
entity_parent_dict = {}
config_doc = Document()
config_root_node = config_doc.createElement('configs')
config_doc.appendChild(config_root_node)
for sheet_name in excel.get_sheet_names():
config_node = config_doc.createElement('config')
config_node.setAttribute("id", sheet_name)
config_node.setAttribute("meta", os.path.join(
config_param.meta_path, sheet_name + config_param.meta_ext))
config_root_node.appendChild(config_node)
excel.set_sheet_by_name(sheet_name) # set sheet
min_row_no = excel.get_min_row_no()
max_row_no = excel.get_max_row_no()+1
min_col_no = excel.get_min_col_no()
max_col_no = excel.get_max_col_no()+1
for row in range(min_row_no + config_param.entity_form_head_row, max_row_no):
meta_class = excel.get_cell_content(row, min_col_no)
if meta_class == None:
continue
meta_class_name = str(meta_class)
if not entity_dict.__contains__(meta_class_name):
entity_dict[meta_class_name] = []
# field type sub_class
for col in [config_param.entity_field_name_col, config_param.entity_field_type_col, config_param.entity_field_sub_class_col]:
cell_content = excel.get_cell_content(row, col)
if cell_content == None:
entity_dict[meta_class_name].append("")
else:
entity_dict[meta_class_name].append(str(cell_content))
# field type parent_class
if(excel.get_cell_content(row, config_param.entity_field_type_col)=="class"):
parent_class_cell = excel.get_cell_content(
row, config_param.entity_field_sub_class_col)
if (parent_class_cell != None):
if (not entity_parent_dict.__contains__(meta_class_name)):
entity_parent_dict[meta_class_name] = []
parent_class_name = str(parent_class_cell)
if parent_class_name not in entity_parent_dict[meta_class_name]:
entity_parent_dict[meta_class_name].append(parent_class_name)
meta_doc = Document()
meta_root_node = meta_doc.createElement('metas')
meta_doc.appendChild(meta_root_node)
for row in range(min_row_no + config_param.entity_form_head_row, max_row_no):
meta_class = excel.get_cell_content(row, min_col_no)
if meta_class == None:
continue
meta_class_name = str(meta_class)
data_node = meta_doc.createElement('meta')
meta_root_node.appendChild(data_node)
for col in range(min_col_no, max_col_no):
# if 2nd row cell value is 'unused', ignore
second_cell_value = excel.get_cell_content(min_row_no + 1, col)
if second_cell_value != None and str.lower(str(second_cell_value)) == config_param.entity_unused_flag:
continue
# ignore empty cell in first row
if excel.get_cell_content(min_row_no, col) == None:
continue
if excel.get_cell_content(row, col) == None:
data_node.setAttribute(
str(excel.get_cell_content(min_row_no, col)), "")
else:
data_node.setAttribute(str(excel.get_cell_content(min_row_no, col)),
str(excel.get_cell_content(row, col)))
with open(os.path.join(res_path, config_param.meta_path, sheet_name + config_param.meta_ext), 'w', encoding='utf-8') as f:
meta_doc.writexml(f, indent="\n",
addindent="\t", encoding='utf-8')
# open meta define file
cpp_file = open(os.path.join(res_path, config_param.cpp_meta_file), 'a', encoding='utf-8')
cpp_file.write(u"\t//////////////////////////////////\n\t//Entity meta\n")
cs_file = open(os.path.join(res_path, config_param.cs_meta_file), 'a', encoding='utf-8')
cs_file.write(u"\t//////////////////////////////////\n\t//Entity meta\n")
#generate config
with open(os.path.join(res_path, config_param.entity_class_file), 'w', encoding='utf-8') as f:
config_doc.writexml(f, indent="\n", addindent="\t", encoding='utf-8')
# print entity_dict
# print entity_parent_dict
# first of all, add parent entity classes
for i in entity_parent_dict:
parent_list = entity_parent_dict.get(i)
if parent_list == None:
continue
for j in range(0, len(parent_list)):
parent = parent_list[j]
if not entity_dict.__contains__(parent):
continue
write_entity(cpp_file, parent, entity_dict, True)
write_entity(cs_file, parent, entity_dict, False)
# then other entity classes
for k in entity_dict:
# jump over parent meta classes
find = False
for p in entity_parent_dict:
parent_list = entity_parent_dict.get(p)
if parent_list == None:
continue
if k in parent_list:
find = True
break
if find == True:
continue
#enum class
write_entity_enum_head(cpp_file,k,True)
write_entity_enum_head(cs_file,k,False)
parent_list = entity_parent_dict.get(k)
if parent_list != None:
cpp_file.write("\t\t//parent entity class enum\n")
for parent in parent_list:
write_entity_enum_all_member(cpp_file, parent, entity_dict,True)
write_entity_enum_all_member(cs_file, parent, entity_dict,False)
cpp_file.write("\n\t\t//self entity class enum\n")
write_entity_enum_all_member(cpp_file, k, entity_dict,True)
write_entity_enum_all_member(cs_file, k, entity_dict,False)
write_entity_enum_end(cpp_file, k,True)
write_entity_enum_end(cs_file, k,False)
write_entity_cpp_head(cpp_file, k)
write_entity_cs_head(cs_file, k)
# parent class members
parent_list = entity_parent_dict.get(k)
if parent_list != None:
cpp_file.write("\t\t//parent entity class\n")
for parent in parent_list:
write_entity_all_member(cpp_file, k,parent, entity_dict, True)
write_entity_all_member(cs_file,k, parent, entity_dict, False)
cpp_file.write("\n\t\t//self entity class\n")
write_entity_all_member(cpp_file,k, k, entity_dict, True)
write_entity_all_member(cs_file,k, k, entity_dict, False)
write_entity_cpp_end(cpp_file)
write_entity_cs_end(cs_file)
# sperate line
cpp_file.write(u"\t//////////////////////////////////\n\t//Config meta\n")
cs_file.write(u"\t//////////////////////////////////\n\t//Config meta\n")
# close meta define files
cpp_file.close()
cs_file.close()
print("generate [%s] finished" % entity_filepath)
# generate single config
def genrate_single_config(res_path, excel_list, classname):
path = excel_list[0]
excel = config_excel.my_excel(path)
excel.set_sheet_by_index(0) # set default sheet
min_row_no = excel.get_min_row_no()
max_row_no = excel.get_max_row_no()+1
min_col_no = excel.get_min_col_no()
max_col_no = excel.get_max_col_no()+1
'''config meta xml'''
meta_doc = Document()
meta_root_node = meta_doc.createElement('metas')
meta_doc.appendChild(meta_root_node)
for col in range(min_col_no, max_col_no):
data_node = meta_doc.createElement('meta')
for row in range(min_row_no, min_row_no + config_param.config_form_head_row):
if config_param.config_form_head_list[row - min_row_no + 1] == 'target':
target_value = str.upper(str(excel.get_cell_content(row, col)))
# S(erver) or A(all) will be generate for server side
if target_value != config_param.target_all or target_value != config_param.target_server:
continue
data_node.setAttribute(
config_param.config_form_head_list[row - min_row_no + 1], excel.get_cell_content(row, col))
# data_node.setAttribute(config_param.field_index,filename+"."+excel.get_cell_content(min_row_no, col))
if data_node.hasAttributes():
meta_root_node.appendChild(data_node)
with open(os.path.join(res_path, config_param.meta_path, classname + config_param.meta_ext), 'w', encoding='utf-8') as f:
meta_doc.writexml(f, indent="\n", addindent="\t", encoding='utf-8')
'''config data xml'''
config_doc = Document()
config_root_node = config_doc.createElement('data')
config_doc.appendChild(config_root_node)
for filepath in excel_list:
excel = config_excel.my_excel(filepath)
excel.set_sheet_by_index(0) # set default sheet
min_row_no = excel.get_min_row_no()
max_row_no = excel.get_max_row_no()+1
min_col_no = excel.get_min_col_no()
max_col_no = excel.get_max_col_no()+1
for row in range(min_row_no + config_param.config_form_head_row, max_row_no):
data_node = config_doc.createElement('data')
config_root_node.appendChild(data_node)
for col in range(min_col_no, max_col_no):
# ignore empty cell in first row
if excel.get_cell_content(min_row_no, col) == None:
continue
data_node.setAttribute(str(excel.get_cell_content(min_row_no, col)),
str(excel.get_cell_content(row, col)))
with open(os.path.join(res_path, config_param.server_res_path, classname + config_param.config_ext), 'w', encoding='utf-8') as f:
config_doc.writexml(
f, indent="\n", addindent="\t", encoding='utf-8')
# open meta define file
cpp_file = open(os.path.join(res_path, config_param.cpp_meta_file), 'a', encoding='utf-8')
cs_file = open(os.path.join(res_path, config_param.cs_meta_file), 'a', encoding='utf-8')
cpp_file.write(u"\tenum class %s : std::uint32_t\n\t{\n\t\tmeta_empty,\n" %(classname))
cs_file.write(u"\tenum %s\n\t{\n\t\tmeta_empty,\n" % (classname))
excel = config_excel.my_excel(excel_list[0])
excel.set_sheet_by_index(0) # set default sheet
min_row_no = excel.get_min_row_no()
max_row_no = excel.get_max_row_no()+1
min_col_no = excel.get_min_col_no()
max_col_no = excel.get_max_col_no()+1
for col in range(min_col_no, max_col_no):
field_name = str(excel.get_cell_content(min_row_no, col))
cpp_file.write(u"\t\t%s,\n" % (field_name))
cs_file.write(u"\t\t%s,\n" % (field_name))
cpp_file.write(u"\t};\n\n")
cpp_file.write(u"\tusing " + classname + "_rep_type = std::underlying_type<"+ classname +">::type;\n\n")
cs_file.write(u"\t}\n")
cpp_file.write(u"\tclass AFConfigMeta" +
classname.capitalize() + "\n\t{\n\tpublic:\n")
cpp_file.write(
u'''\t\tstatic const std::string& self_name() { static const std::string meta_%s_ = "%s"; return meta_%s_; }\n\n''' %
(classname, classname, classname))
cs_file.write(u"\tpublic class AFConfigMeta" +
classname.capitalize() + "\n\t{\n")
cs_file.write(
u'''\t\tpublic static readonly String self_name = "%s";\n\n''' % classname)
if(not entity_dict.__contains__(classname)):
entity_dict[classname]=[]
for col in range(min_col_no, max_col_no):
field_name = str(excel.get_cell_content(min_row_no, col))
type_name = str(excel.get_cell_content(min_row_no + 1, col))
entity_dict[classname].append(field_name)
entity_dict[classname].append(type_name)
entity_dict[classname].append("")
cpp_file.write(u'''\t\tstatic const std::string& %s() { static const std::string %s_ = "%s"; return %s_; } // %s\n''' %
(field_name, field_name, field_name, field_name, type_name))
cpp_file.write(u'''\t\tstatic uint32_t %s_index() { static const int %s_index_ = static_cast<%s_rep_type>(%s::%s); return %s_index_; } // %s\n''' %
(field_name, field_name, classname,classname, field_name, field_name,field_name))
cs_file.write(u'''\t\tpublic static readonly String %s = "%s"; // %s\n''' %
(field_name, field_name, type_name))
cs_file.write(u'''\t\tpublic static UInt32 %s_index = (UInt32)%s.%s; // %s\n''' %
(field_name, classname,field_name, field_name))
cpp_file.write(u"\t};\n\n")
cs_file.write(u"\t}\n\n")
# close meta define files
cpp_file.close()
cs_file.close()
return True
# generate excel to config
def generate_config(res_path):
print("Start to generate config...")
print("---------------------------------------")
meta_doc = Document()
meta_root_node = meta_doc.createElement('configs')
meta_doc.appendChild(meta_root_node)
file_list = os.listdir(os.path.join(res_path, config_param.excel_path))
filename_dict={}
for i in range(0, len(file_list)):
file_path = os.path.join(
res_path, config_param.excel_path, file_list[i])
if not os.path.isfile(file_path):
continue
(filename, ext) = os.path.splitext(file_list[i])
# file is entity_class or is not excel file
if filename == config_param.special_file_name or (ext != config_param.excel_ext and ext != config_param.excel_new_ext):
continue
if filename.startswith("~$"):
continue
else:
classname = os.path.splitext(filename)[0]
if not filename_dict.__contains__(classname):
filename_dict[classname] = []
filename_dict[classname].append(file_path)
for classname in filename_dict:
ret = genrate_single_config(res_path, filename_dict[classname], classname)
if ret == True:
# insert into config_class.config
config_node = meta_doc.createElement('config')
config_node.setAttribute("id", classname)
config_node.setAttribute("meta", os.path.join(
config_param.meta_path, classname + config_param.meta_ext))
config_node.setAttribute("res", os.path.join(
config_param.server_res_path, classname + config_param.config_ext))
meta_root_node.appendChild(config_node)
print("generate file = [%s] success" % file_path)
else:
raise Exception("generate file = [%s] failed" % file_path)
with open(os.path.join(res_path, config_param.config_class_file), 'w', encoding='utf-8') as f:
meta_doc.writexml(f, indent="\n", addindent="\t", encoding='utf-8')
print("---------------------------------------")
print("generate config finished")
def field_name_map_index(file,enum_class_name,field_name,cpp_or_cs=True):
if cpp_or_cs:
file.write("\t\t\t\t\t\t{\"%s\",static_cast<%s_rep_type>(%s::%s)},\n" % (field_name,enum_class_name,enum_class_name,field_name))
else:
file.write("\t\t\t\t\t{\"%s\",(UInt32)%s.%s},\n" % (field_name,enum_class_name,field_name))
def field_name_map_index_all(file,enum_class_name,class_name,entity_dict,cpp_or_cs=True):
if cpp_or_cs:
member_list=entity_dict[class_name]
for k in range(0,len(member_list),3):
field_name = member_list[k]
type_name = member_list[k+1]
sub_class_name = member_list[k+2]
if type_name == "class":
field_name_map_index_all(file,enum_class_name,sub_class_name,entity_dict,cpp_or_cs=True)
else:
field_name_map_index(file,enum_class_name,field_name,cpp_or_cs=True)
else:
member_list=entity_dict[class_name]
for k in range(0,len(member_list),3):
field_name = member_list[k]
type_name = member_list[k+1]
sub_class_name = member_list[k+2]
if type_name == "class":
field_name_map_index_all(file,enum_class_name,sub_class_name,entity_dict,cpp_or_cs=False)
else:
field_name_map_index(file,enum_class_name,field_name,cpp_or_cs=False)
def field_name_map_index_head_and_end(res_path,entity_dict,cpp_or_cs=True):
if cpp_or_cs:
cpp_file = open(os.path.join(res_path, config_param.cpp_meta_file), 'a', encoding='utf-8')
cpp_file.write(u"\tclass AFMetaNameIndex\n\t{\n")
cpp_file.write(u"\tpublic:\n")
cpp_file.write(u"\t\tstatic uint32_t GetIndex(const std::string& class_name, const std::string& field_name)\n\t\t{\n")
cpp_file.write(u"\t\t\tstatic const std::unordered_map<std::string,std::unordered_map<std::string,std::uint32_t>> class_name_index=\n\t\t\t{\n")
for class_name in entity_dict:
cpp_file.write(u"\t\t\t\t{\"%s\",\n\t\t\t\t\t{\n" % (class_name))
field_name_map_index_all(cpp_file,class_name,class_name,entity_dict,True)
cpp_file.write("\t\t\t\t\t}\n\t\t\t\t},\n")
cpp_file.write(u"\t\t\t};\n")
cpp_file.write(u"\t\t\tauto iter_class = class_name_index.find(class_name);\n")
cpp_file.write(u"\t\t\tif(iter_class == class_name_index.end()) { return 0; }\n")
cpp_file.write(u"\t\t\tauto iter_field = iter_class->second.find(field_name);\n")
cpp_file.write(u"\t\t\tif(iter_field == iter_class->second.end()) { return 0; }\n")
cpp_file.write(u"\t\t\treturn iter_field->second;\n")
cpp_file.write(u"\t\t}\n")
cpp_file.write(u"\t};\n")
cpp_file.close()
else:
cs_file = open(os.path.join(res_path, config_param.cs_meta_file), 'a', encoding='utf-8')
cs_file.write(u"\tclass AFMetaNameIndex{\n")
cs_file.write(u"\t\tDictionary<string, Dictionary<string,UInt32>> class_name_index = new Dictionary<string, Dictionary<string,UInt32>>{\n")
for class_name in entity_dict:
cs_file.write(u"\t\t\t{\"%s\",\n\t\t\t\tnew Dictionary<string,UInt32>{\n" % (class_name))
field_name_map_index_all(cs_file,class_name,class_name,entity_dict,False)
cs_file.write("\t\t\t\t}\n\t\t\t},\n")
cs_file.write(u"\t\t};\n")
cs_file.write(u"\t}\n")
cs_file.close()
# write meta define files for cpp and c#
def write_meta_define_begin(res_path):
with open(os.path.join(res_path, config_param.cpp_meta_file), 'w', encoding='utf-8') as cpp_file:
cpp_file.write(u'''/*
* This source file is part of ARK
* For the latest info, see https://github.com/ArkNX
*
* Copyright (c) 2013-2019 ArkNX authors.
*
* Licensed under the Apache License, Version 2.0 (the 'License');
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an 'AS IS' BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
# pragma once
# include <string>
# include <map>
# include <unordered_map>
namespace ark
{\n''')
with open(os.path.join(res_path, config_param.cs_meta_file), 'w', encoding='utf-8') as cs_file:
cs_file.write(u'''/*
* This source file is part of ARK
* For the latest info, see https://github.com/ArkNX
*
* Copyright (c) 2013-2019 ArkNX authors.
*
* Licensed under the Apache License, Version 2.0 (the 'License');
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an 'AS IS' BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading;
namespace ark
{\n''')
# write meta define files
def write_meta_define_end(res_path):
with open(os.path.join(res_path, config_param.cpp_meta_file), 'a', encoding='utf-8') as cpp_file:
cpp_file.write(u"}")
with open(os.path.join(res_path, config_param.cs_meta_file), 'a', encoding='utf-8') as cs_file:
cs_file.write(u"}")
# main
if __name__ == "__main__":
args = parse_args()
res_path = args['path']
print(u"res_path = " + res_path)
# gen meta define files
write_meta_define_begin(res_path)
print("+++++++++++++++++++++++++++++++++++++++++")
# {{class:field_name,type_name,sub_class_name}}
entity_dict = {}
# gen entity meta
generate_entity_meta(res_path)
# gen config meta
generate_config(res_path)
# name_index
field_name_map_index_head_and_end(res_path,entity_dict,True)
field_name_map_index_head_and_end(res_path,entity_dict,False)
write_meta_define_end(res_path)
| 43.916409 | 155 | 0.636412 |
722d79f1bb2010f518235fadfb43bb136eab68c5
| 3,167 |
py
|
Python
|
Codes/Hill Climbing Matplot.py
|
tanu17/Graph-Algorithms-for-Heavy-weight-binary-codes
|
791a2ea858583c446e8a0a2de3c90107de534bc7
|
[
"MIT"
] | null | null | null |
Codes/Hill Climbing Matplot.py
|
tanu17/Graph-Algorithms-for-Heavy-weight-binary-codes
|
791a2ea858583c446e8a0a2de3c90107de534bc7
|
[
"MIT"
] | null | null | null |
Codes/Hill Climbing Matplot.py
|
tanu17/Graph-Algorithms-for-Heavy-weight-binary-codes
|
791a2ea858583c446e8a0a2de3c90107de534bc7
|
[
"MIT"
] | null | null | null |
import random
import itertools
import matplotlib.pyplot as plt
import numpy as np
"""
Let N(x) be the heuristic solution after hill climbing
| X \ {x} x belongs to X
N(x) = | removal depends on probability
| X\ {y} U {x} x belongs to H(n,w) \ x & y belongs to X
"""
################## FOR HEAVY WEIGHT CODES ############################
iter,p=0,0
seed=[]
max_len_list=[]
max_len=[0,0]
def constant_weight_codes(n, k):
result = []
for bits in itertools.combinations(range(n), k):
s = ['0'] * n
for bit in bits:
s[bit] = '1'
result.append(''.join(s))
return (result)
def hamming_dist(x,y):
assert len(x) == len(y)
count,z = 0,int(x,2)^int(y,2)
while z:
count += 1
z &= z-1
return count
def randomGenertor(binSet):
while True:
r=random.randint(0,len(binSet)-1)
yield (binSet[r])
def HillClimbing_Better(binSet,d):
global iter,seed
while(True):
if (len(seed)<100):
compatibility=2
iter+=1
if (iter>8000):
break
try:
b=next(randomGenertor(binSet))
except:
return(seed)
for x in seed:
if (x):
if (hamming_dist(x,b)<d):
compatibility-=1
if (compatibility==0):
break
if (compatibility==1):
a=x
if (compatibility==1 and random.random()<p and len(seed)>0):
seed.remove(a)
if b not in seed:
seed.append(b)
if compatibility==2 and b not in seed:
seed.append(b)
else:
return (seed)
return (seed)
class graph_matplot:
global n,w,d
@staticmethod
def graph_main(X,Y):
plt.plot(X,Y)
plt.ylabel("Length of Solution of H("+str(n)+","+str(d)+","+str(w)+")")
plt.xlabel("Probability")
plt.axis([0, 1, 0, 70])
plt.show()
#---------------------------------MAIN----------------------------------#
n=int(input("Enter length of binary word \n"))
w=int(input("Enter weight \n"))
d=int(input("Enter hamming distance\n"))
xList=[]
yList=[]
binSet=[]
for f in range(w,n+1):
binSet=binSet+constant_weight_codes(n,f)
for i in range(0,10000):
global p,iter,seed
p=i/10000
seed=[]
iter=0
heuristic_soln=HillClimbing_Better(binSet,d)
if (len(heuristic_soln)>max_len[0]):
max_len=[len(heuristic_soln),p]
max_len_list=heuristic_soln
xList.append(p)
yList.append(len(heuristic_soln))
print(p)
graph_matplot.graph_main(xList,yList)
print("="*30)
print("Max len: ",max_len[0]," And occured at probability: ",max_len[1])
print("H(",n,",",d,",",w,")")
print("Elements in max len list: ",max_len_list)
| 23.992424 | 80 | 0.475213 |
fcf844cbeadaad435ccdfa098ef20d5607017637
| 4,390 |
py
|
Python
|
paddlex/ppdet/utils/profiler.py
|
cheneyveron/PaddleX
|
86f73fc6a66b12c638f642524bfd1cf730e26c4b
|
[
"Apache-2.0"
] | 8 |
2020-03-11T08:12:19.000Z
|
2020-03-18T08:33:56.000Z
|
paddlex/ppdet/utils/profiler.py
|
cheneyveron/PaddleX
|
86f73fc6a66b12c638f642524bfd1cf730e26c4b
|
[
"Apache-2.0"
] | 1 |
2020-03-15T13:05:43.000Z
|
2020-03-15T13:05:43.000Z
|
paddlex/ppdet/utils/profiler.py
|
cheneyveron/PaddleX
|
86f73fc6a66b12c638f642524bfd1cf730e26c4b
|
[
"Apache-2.0"
] | 2 |
2020-03-15T11:53:54.000Z
|
2020-03-24T07:27:09.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import paddle
# A global variable to record the number of calling times for profiler
# functions. It is used to specify the tracing range of training steps.
_profiler_step_id = 0
# A global variable to avoid parsing from string every time.
_profiler_options = None
class ProfilerOptions(object):
'''
Use a string to initialize a ProfilerOptions.
The string should be in the format: "key1=value1;key2=value;key3=value3".
For example:
"profile_path=model.profile"
"batch_range=[50, 60]; profile_path=model.profile"
"batch_range=[50, 60]; tracer_option=OpDetail; profile_path=model.profile"
ProfilerOptions supports following key-value pair:
batch_range - a integer list, e.g. [100, 110].
state - a string, the optional values are 'CPU', 'GPU' or 'All'.
sorted_key - a string, the optional values are 'calls', 'total',
'max', 'min' or 'ave.
tracer_option - a string, the optional values are 'Default', 'OpDetail',
'AllOpDetail'.
profile_path - a string, the path to save the serialized profile data,
which can be used to generate a timeline.
exit_on_finished - a boolean.
'''
def __init__(self, options_str):
assert isinstance(options_str, str)
self._options = {
'batch_range': [10, 20],
'state': 'All',
'sorted_key': 'total',
'tracer_option': 'Default',
'profile_path': '/tmp/profile',
'exit_on_finished': True
}
self._parse_from_string(options_str)
def _parse_from_string(self, options_str):
for kv in options_str.replace(' ', '').split(';'):
key, value = kv.split('=')
if key == 'batch_range':
value_list = value.replace('[', '').replace(']', '').split(',')
value_list = list(map(int, value_list))
if len(value_list) >= 2 and value_list[0] >= 0 and value_list[
1] > value_list[0]:
self._options[key] = value_list
elif key == 'exit_on_finished':
self._options[key] = value.lower() in ("yes", "true", "t", "1")
elif key in [
'state', 'sorted_key', 'tracer_option', 'profile_path'
]:
self._options[key] = value
def __getitem__(self, name):
if self._options.get(name, None) is None:
raise ValueError(
"ProfilerOptions does not have an option named %s." % name)
return self._options[name]
def add_profiler_step(options_str=None):
'''
Enable the operator-level timing using PaddlePaddle's profiler.
The profiler uses a independent variable to count the profiler steps.
One call of this function is treated as a profiler step.
Args:
profiler_options - a string to initialize the ProfilerOptions.
Default is None, and the profiler is disabled.
'''
if options_str is None:
return
global _profiler_step_id
global _profiler_options
if _profiler_options is None:
_profiler_options = ProfilerOptions(options_str)
if _profiler_step_id == _profiler_options['batch_range'][0]:
paddle.utils.profiler.start_profiler(
_profiler_options['state'], _profiler_options['tracer_option'])
elif _profiler_step_id == _profiler_options['batch_range'][1]:
paddle.utils.profiler.stop_profiler(_profiler_options['sorted_key'],
_profiler_options['profile_path'])
if _profiler_options['exit_on_finished']:
sys.exit(0)
_profiler_step_id += 1
| 39.196429 | 81 | 0.628246 |
12f3434e71a5cbc6398ae865188f83d3c9fa32a7
| 14,904 |
py
|
Python
|
python/desc/sims/GCRCatSimInterface/CatalogClasses.py
|
mpwiesner/sims_GCRCatSimInterface
|
831e78ec8eb610983768d4657fbff9744cb17249
|
[
"BSD-3-Clause"
] | null | null | null |
python/desc/sims/GCRCatSimInterface/CatalogClasses.py
|
mpwiesner/sims_GCRCatSimInterface
|
831e78ec8eb610983768d4657fbff9744cb17249
|
[
"BSD-3-Clause"
] | null | null | null |
python/desc/sims/GCRCatSimInterface/CatalogClasses.py
|
mpwiesner/sims_GCRCatSimInterface
|
831e78ec8eb610983768d4657fbff9744cb17249
|
[
"BSD-3-Clause"
] | 2 |
2018-04-12T20:49:23.000Z
|
2018-08-04T00:08:46.000Z
|
import os
import re
import numpy as np
import copy
from .SedFitter import sed_from_galacticus_mags
from .SedFitter import sed_filter_names_from_catalog
from lsst.utils import getPackageDir
from lsst.sims.catalogs.definitions import InstanceCatalog
from lsst.sims.catalogs.decorators import cached
from lsst.sims.catUtils.exampleCatalogDefinitions import PhoSimCatalogSersic2D
from lsst.sims.catUtils.exampleCatalogDefinitions import PhoSimCatalogZPoint
from lsst.sims.catUtils.exampleCatalogDefinitions import PhoSimCatalogSN
from lsst.sims.catUtils.mixins import VariabilityAGN
from lsst.sims.catalogs.decorators import cached, compound
from lsst.sims.catUtils.mixins import EBVmixin
__all__ = ["PhoSimDESCQA", "PhoSimDESCQA_AGN", "DC2PhosimCatalogSN",
"SubCatalogMixin", "SprinklerTruthCatMixin", "TruthPhoSimDESCQA",
"TruthPhoSimDESCQA_AGN"]
#########################################################################
# define a class to write the PhoSim catalog; defining necessary defaults
class SubCatalogMixin(object):
"""
This mixin provides a way to write parallel catalogs from
a CompoundInstanceCatalog. It supplants the _write_recarray
method, which CompundInstanceCatalog calls, and replaces it
with something that will write a separate truth catalog.
"""
_subcat_file_handle = None
subcat_prefix = None # prefix prepended to main InstanceCatalog file name
subcat_suffix = None # suffix appended to main InstanceCatalog file name
# This boolean will keep track of whether or not this
# truth catalog has been written to yet. If it has,
# it will be opened in mode 'a'; if not, it will be
# opened in mode 'w'
_subcat_cat_written = False
# so that we don't have to rework the CompoundInstanceCatalog
# API
_write_subcat_header = False
# The list below *will* be shared among instantiations
# as a safeguard against accidentally opening the
# same SubCatalog in write mode twice
_list_of_opened_subcats = set()
@cached
def get_sprinkling_switch(self):
is_sprinkled = self.column_by_name('is_sprinkled')
return np.where(is_sprinkled==1, 1, None)
def _write_recarray(self, local_recarray, file_handle):
"""
local_recarray is a recarray of the data to be written
file_handle points to the main InstanceCatalog that
the CompoundInstanceCatalog is trying to write
"""
if self._subcat_file_handle is None:
file_dir = os.path.dirname(file_handle.name)
instcat_name = os.path.basename(file_handle.name)
subcat_file_name = instcat_name
if self.subcat_prefix is None and self.subcat_suffix is None:
raise RuntimeError("Trying to write SubCatalog without either "
"a subcat_prefix or a subcat_suffix. This "
"could cause you to overwrite existing files")
if self.subcat_prefix is not None:
subcat_file_name = self.subcat_prefix + subcat_file_name
if self.subcat_suffix is not None:
subcat_file_name += self.subcat_suffix
subcat_name = os.path.join(file_dir,
subcat_file_name)
assert subcat_name != file_handle.name
if not self._subcat_cat_written:
write_mode = 'w'
if subcat_name in self._list_of_opened_subcats:
raise RuntimeError("Trying to create SubCatalog\n"
+ "%s\n" % subcat_name
+ "which was already created")
else:
write_mode = 'a'
self._subcat_file_handle = open(subcat_name, write_mode)
self._subcat_cat_written = True
self._list_of_opened_subcats.add(subcat_name)
if write_mode == 'w' and self._write_subcat_header:
# call InstanceCatalog.write_header to avoid calling
# the PhoSim catalog write_header (which will require
# a phoSimHeaderMap)
InstanceCatalog.write_header(self, self._subcat_file_handle)
InstanceCatalog._write_recarray(self, local_recarray,
self._subcat_file_handle)
class SprinklerTruthCatMixin(SubCatalogMixin):
"""
A sub-class of the SubCatalogMixin specifically for generating truth
catalogs for sprinkled objects
"""
column_outputs = ['uniqueId', 'galaxy_id', 'raJ2000', 'decJ2000',
'sedFilepath', 'phoSimMagNorm',
'redshift', 'isPoint']
cannot_be_null = ['sprinkling_switch']
_write_subcat_header = True
class DC2PhosimCatalogSN(PhoSimCatalogSN):
"""
Modification of the PhoSimCatalogSN mixin to provide shorter sedFileNames
by leaving out the parts of the directory name. Also fix name changes from
gamma to shear.
"""
def get_sedFilepath(self):
return self.column_by_name('TsedFilepath')
def get_shorterFileNames(self):
"""
Method to truncate filenames for transient
spectra written out by phosim.
.. note: the variable sep needs to be in
`self.sn_sedfile_prefix` before writing out
a phosim catalog.
"""
fnames = self.column_by_name('sedFilepath')
sep = 'Dynamic/specFileSN_'
split_names = []
for fname in fnames:
if 'None' not in fname:
fname = sep + fname.split(sep)[-1]
else:
fname = 'None'
split_names.append(fname)
return np.array(split_names)
# column_outputs = PhoSimCatalogSN.column_outputs
# column_outputs[PhoSimCatalogSN.column_outputs.index('sedFilepath')] = \
# 'shorterFileNames'
column_outputs = ['prefix', 'uniqueId', 'raPhoSim', 'decPhoSim',
'phoSimMagNorm', 'shorterFileNames', 'redshift',
'shear1', 'shear2', 'kappa', 'raOffset', 'decOffset',
'spatialmodel', 'internalExtinctionModel',
'galacticExtinctionModel', 'galacticAv', 'galacticRv']
cannot_be_null = ['x0', 't0', 'z', 'shorterFileNames']
default_columns = [('gamma1', 0., float), ('gamma2', 0., float), ('kappa', 0., float),
('raOffset', 0., float), ('decOffset', 0., float),
('galacticAv', 0.1, float), ('galacticRv', 3.1, float),
('galacticExtinctionModel', 'CCM', (str, 3)),
('internalExtinctionModel', 'none', (str, 4)), ('internalAv', 0., float),
('internalRv', 3.1, float), ('shear1', 0., float), ('shear2', 0., float)]
class PhoSimDESCQA(PhoSimCatalogSersic2D, EBVmixin):
# default values used if the database does not provide information
default_columns = [('raOffset', 0.0, float), ('decOffset', 0.0, float),
('internalExtinctionModel', 'CCM', str, 3),
('internalAv', 0.1, float),
('internalRv', 3.1, float),
('galacticExtinctionModel', 'CCM', str, 3),
('galacticRv', 3.1, float)]
cannot_be_null = ['magNorm']
def __init__(self, *args, **kwargs):
# Update the spatial model if knots are requested, for knots, the sersic
# parameter actually contains the number of knots
if 'cannot_be_null' in kwargs.keys():
if 'hasKnots' in kwargs['cannot_be_null']:
self.catalog_type = 'phoSim_catalog_KNOTS'
self.spatialModel = 'knots'
if 'hasDisk' not in kwargs['cannot_be_null']:
kwargs['cannot_be_null'].append('hasDisk')
super(PhoSimDESCQA, self).__init__(*args, **kwargs)
# below are defined getter methods used to define CatSim value-added columns
@cached
def get_hasDisk(self):
output = np.where(self.column_by_name('stellar_mass_disk')>0.0, 1.0, None)
return output
@cached
def get_hasKnots(self):
return self.column_by_name('hasDisk')
@cached
def get_hasBulge(self):
output = np.where(self.column_by_name('stellar_mass_bulge')>0.0, 1.0, None)
return output
@compound('internalAv_fitted', 'internalRv_fitted')
def get_internalDustParams(self):
if ('hasDisk' in self._cannot_be_null and
'hasBulge' in self._cannot_be_null):
raise RuntimeError('\nUnsure whether this is a disk catalog '
'or a bulge catalog\n'
'self._cannot_be_null %s' % self._cannot_be_null)
elif 'hasDisk' in self._cannot_be_null:
lum_type = 'disk'
elif 'hasBulge' in self._cannot_be_null:
lum_type = 'bulge'
else:
raise RuntimeError('\nUnsure whether this is a disk catalog '
'or a bulge catalog\n'
'self._cannot_be_null %s' % self._cannot_be_null)
# this is a hack to replace anomalous values of dust extinction
# with more reasonable values
if not hasattr(self, '_dust_rng'):
self._dust_rng = np.random.RandomState(182314)
# temporarily suppress divide by zero warnings
with np.errstate(divide='ignore', invalid='ignore'):
av_name = 'A_v_%s' % lum_type
if av_name not in self._all_available_columns:
av_name = 'A_v'
av_list = copy.copy(self.column_by_name(av_name))
rv_name = 'R_v_%s' % lum_type
if rv_name not in self._all_available_columns:
rv_name = 'R_v'
rv_list = copy.copy(self.column_by_name(rv_name))
offensive_av = np.where(np.logical_or(np.isnan(av_list),
np.logical_or(av_list<0.001, av_list>3.1)))
av_list[offensive_av] = self._dust_rng.random_sample(len(offensive_av[0]))*3.1+0.001
offensive_rv = np.where(np.logical_or(np.isnan(rv_list),
np.logical_or(rv_list<1.0, rv_list>5.0)))
rv_list[offensive_rv] = self._dust_rng.random_sample(len(offensive_rv[0]))*4.0+1.0
return np.array([av_list, rv_list])
@compound('sedFilename_fitted', 'magNorm_fitted')
def get_fittedSedAndNorm(self):
if not hasattr(self, '_disk_flux_names'):
f_params = sed_filter_names_from_catalog(self.db_obj._catalog)
np.testing.assert_array_almost_equal(f_params['disk']['wav_min'],
f_params['bulge']['wav_min'],
decimal=10)
np.testing.assert_array_almost_equal(f_params['disk']['wav_width'],
f_params['bulge']['wav_width'],
decimal=10)
self._disk_flux_names = f_params['disk']['filter_name']
self._bulge_flux_names = f_params['bulge']['filter_name']
self._sed_wav_min = f_params['disk']['wav_min']
self._sed_wav_width = f_params['disk']['wav_width']
if 'hasBulge' in self._cannot_be_null and 'hasDisk' in self._cannot_be_null:
raise RuntimeError('\nUnsure whether this is a disk catalog or a bulge catalog.\n'
'Both appear to be in self._cannot_be_null.\n'
'self._cannot_be_null: %s' % self._cannot_be_null)
elif 'hasBulge' in self._cannot_be_null:
flux_names = self._bulge_flux_names
elif 'hasDisk' in self._cannot_be_null:
flux_names = self._disk_flux_names
else:
raise RuntimeError('\nUnsure whether this is a disk catalog or a bluge catalog.\n'
'Neither appear to be in self._cannot_be_null.\n'
'self._cannot_be_null: %s' % self._cannot_be_null)
with np.errstate(divide='ignore', invalid='ignore'):
mag_array = np.array([-2.5*np.log10(self.column_by_name(name))
for name in flux_names])
redshift_array = self.column_by_name('true_redshift')
if len(redshift_array) == 0:
return np.array([[], []])
H0 = self.db_obj._catalog.cosmology.H0.value
Om0 = self.db_obj._catalog.cosmology.Om0
(sed_names,
mag_norms) = sed_from_galacticus_mags(mag_array,
redshift_array,
H0, Om0,
self._sed_wav_min,
self._sed_wav_width)
return np.array([sed_names, mag_norms])
@cached
def get_magNorm(self):
raw_magnorm = self.column_by_name('magNorm_dc2')
fitted_magnorm = self.column_by_name('magNorm_fitted')
preliminary_output=np.where(np.isnan(raw_magnorm), fitted_magnorm, raw_magnorm)
preliminary_output = np.array(preliminary_output).astype(float)
return np.where(preliminary_output<998.0, preliminary_output, np.NaN)
@cached
def get_sedFilepath(self):
raw_filename = self.column_by_name('sedFilename_dc2')
fitted_filename = self.column_by_name('sedFilename_fitted')
return np.where(np.char.find(raw_filename.astype('str'), 'None')==0,
fitted_filename, raw_filename)
@cached
def get_internalRv(self):
raw_rv = self.column_by_name('internalRv_dc2')
fitted_rv = self.column_by_name('internalRv_fitted')
return np.where(np.isnan(raw_rv), fitted_rv, raw_rv)
@cached
def get_internalAv(self):
raw_av = self.column_by_name('internalAv_dc2')
fitted_av = self.column_by_name('internalAv_fitted')
return np.where(np.isnan(raw_av), fitted_av, raw_av)
def get_phoSimMagNorm(self):
"""
Need to leave this method here to overload the get_phoSimMagNorm
in the base PhoSim InstanceCatalog classes
"""
self.column_by_name('is_sprinkled')
return self.column_by_name('magNorm')
class TruthPhoSimDESCQA(SprinklerTruthCatMixin, PhoSimDESCQA):
pass
class PhoSimDESCQA_AGN(PhoSimCatalogZPoint, EBVmixin, VariabilityAGN):
cannot_be_null = ['sedFilepath', 'magNorm']
@cached
def get_prefix(self):
self.column_by_name('is_sprinkled')
chunkiter = range(len(self.column_by_name(self.refIdCol)))
return np.array(['object' for i in chunkiter], dtype=(str, 6))
class TruthPhoSimDESCQA_AGN(SprinklerTruthCatMixin, PhoSimDESCQA_AGN):
pass
| 41.057851 | 96 | 0.611648 |
ab0619f537226235945d25b6c0e3a61547304d47
| 12,851 |
py
|
Python
|
view/messages.py
|
Prelysium/facemask-backend
|
bbd00ef095ef794da8a64b01bc90a3052618876b
|
[
"MIT"
] | null | null | null |
view/messages.py
|
Prelysium/facemask-backend
|
bbd00ef095ef794da8a64b01bc90a3052618876b
|
[
"MIT"
] | null | null | null |
view/messages.py
|
Prelysium/facemask-backend
|
bbd00ef095ef794da8a64b01bc90a3052618876b
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import math
from config import config_import as cu
from DB.db import CounterDB
# Database object
DB = CounterDB()
# Import conf constants
FONT_CONF = cu.get_config_data_by_key("font")
TEXT_CONF = cu.get_config_data_by_key("text")
CAPACITY = cu.get_config_data_by_key("CAPACITY")
DISPLAY_CONF = cu.get_config_data_by_key("monitor_display")
WATERMARK_CONF = cu.get_config_data_by_key("watermarks")
OVERCROWD_CONF = cu.get_config_data_by_key("overcrowd")
# Store consts used across functions in-memory
FONT_PATH = FONT_CONF["FONT_PATH"]
FONT_SMALL = ImageFont.truetype(FONT_PATH, FONT_CONF["FONT_SIZE_SMALL"])
FONT_MEDIUM = ImageFont.truetype(FONT_PATH, FONT_CONF["FONT_SIZE_MEDIUM"])
FONT_LARGE = ImageFont.truetype(FONT_PATH, FONT_CONF["FONT_SIZE_LARGE"])
FONT_WAIT = ImageFont.truetype(FONT_PATH, FONT_CONF["FONT_WAIT"])
TEXT_COUNTER = TEXT_CONF["TEXT_COUNTER"]
TEXT_BITTE = TEXT_CONF["TEXT_BITTE"]
TEXT_WAIT = TEXT_CONF["TEXT_WAIT"]
WARNING_COLOR = DISPLAY_CONF["WARNING_COLOR"]
WHITE = DISPLAY_CONF["WHITE"]
OVERLAY_ALPHA = DISPLAY_CONF["OVERLAY_ALPHA"]
OVERLAY_COLOR = DISPLAY_CONF["OVERLAY_COLOR"]
PLEASE_WAIT_COLOR = DISPLAY_CONF["PLEASE_WAIT_COLOR"]
def overcrowd_messages(img):
"""
Add overcrowd messages on image
Args:
img (np.array): Image array
Returns:
(np.array)
"""
# add alpha channel to image
alpha_channel = np.ones((img.shape[0], img.shape[1]), dtype="uint8") * 255
img = np.dstack([img, alpha_channel])
img_pil = Image.fromarray(img)
draw = ImageDraw.Draw(img_pil)
X_center_right = OVERCROWD_CONF["X_CENTER_RIGHT"]
X_center_left = OVERCROWD_CONF["X_CENTER_LEFT"]
# texts, that need to be added using PIL
text_stat_left = "{}".format(CAPACITY)
text_stat_right = " / {}".format(CAPACITY)
# shapes of each text
w_text_wait, h_text_wait = draw.textsize(TEXT_WAIT, stroke_width=3, font=FONT_WAIT)
w_text_counter, h_text_counter = draw.textsize(
TEXT_COUNTER, stroke_width=1, font=FONT_LARGE
)
w_stat_left, h_stat_left = draw.textsize(
text_stat_left, stroke_width=2, font=FONT_LARGE
)
w_stat_right, h_stat_right = draw.textsize(
text_stat_right, stroke_width=2, font=FONT_LARGE
)
# calculation of coordinates for each text
X_text_wait = int(X_center_right - w_text_wait / 2)
Y_text_wait = OVERCROWD_CONF["Y_TEXT_WAIT"]
X_text_counter = int(X_center_right - w_text_counter / 2)
Y_text_counter = OVERCROWD_CONF["Y_TEXT_COUNTER"]
X_stat_left = int(X_center_right - (w_stat_left + w_stat_right) / 2)
Y_stat = Y_text_counter + h_text_counter
X_stat_right = X_stat_left + w_stat_left
# draw texts on image
draw.text(
(X_text_wait, Y_text_wait),
TEXT_WAIT,
font=FONT_WAIT,
fill=PLEASE_WAIT_COLOR,
stroke_width=3,
)
draw.text(
(X_text_counter, Y_text_counter),
TEXT_COUNTER,
font=FONT_LARGE,
fill=WHITE,
stroke_width=1,
)
draw.text(
(X_stat_left, Y_stat),
text_stat_left,
font=FONT_LARGE,
fill=WARNING_COLOR,
stroke_width=2,
)
draw.text(
(X_stat_right, Y_stat),
text_stat_right,
font=FONT_LARGE,
fill=WHITE,
stroke_width=2,
)
# add overlay on image
overlay = np.zeros((1080, 1920, 4), dtype="uint8")
img = Image.alpha_composite(img_pil, Image.fromarray(overlay, "RGBA"))
img = cv2.cvtColor(np.array(img, dtype="uint8"), cv2.COLOR_BGR2RGB)
return img
def full_overlay(img, X_margin, Y_margin):
"""
Adds black overlay covering most of the image
Args:
img (np.array): Image
X_margin (int): Distance from image left/right edges in pixels
Y_margin (int): Distance from image top/bottom edges in pixels
Returns:
(np.array): Updated image
"""
overlay = img.copy()
overlay = cv2.rectangle(
overlay,
(X_margin, Y_margin),
(img.shape[1] - X_margin, img.shape[0] - Y_margin),
OVERLAY_COLOR,
-1,
)
img = cv2.addWeighted(overlay, OVERLAY_ALPHA, img, 1 - OVERLAY_ALPHA, 0, img)
def overcrowd_overlay(img):
"""
Adds a message about the place being full and
an overlay covering most of the image
Args:
img (np.array): Image
Returns:
(np.array): Updated image
"""
h, w, _ = img.shape
Y_margin = int(h * 65 / 1080)
X_margin = int(w * 60 / 1920)
full_overlay(img, X_margin, Y_margin)
img = overcrowd_messages(img)
return img
def add_warning_text(img, box_height):
"""
Add warning text on top centre of the image
Args:
img (np.array): Image to place warning text over
box_height (int): height of the box in pixels
Returns:
(np.array): Updated image, with warning text
"""
width = img.shape[1]
img_pil = Image.fromarray(img)
draw = ImageDraw.Draw(img_pil)
# import conf constants for re-use
SAFETY_FIRST = TEXT_CONF["SAFETY_FIRST"]
# get shape for parts of text
w_text, h_text = draw.textsize(SAFETY_FIRST, stroke_width=1, font=FONT_LARGE)
# ?make more descriptive name for w_exc
w_exc, _ = draw.textsize("!", stroke_width=1, font=FONT_LARGE)
w_triangle = h_text * 3 / math.sqrt(3)
# calculate coordinates for text and add on image
text_X = int(width / 2 - w_text / 2)
text_Y = int(box_height / 2 - h_text / 2)
# draw the warning message text
draw.text(
(text_X, text_Y),
SAFETY_FIRST,
font=FONT_LARGE,
fill=WARNING_COLOR,
stroke_width=2,
)
# transform img to np.array for cv2
img = np.array(img_pil, dtype="uint8")
# point coordinates for laft warning triangle
point_left = [
int(width / 2 - w_text / 2 + w_exc / 2 - w_triangle / 2),
int(box_height / 2 + h_text * 0.75),
]
point_up = [
int(width / 2 - w_text / 2 + w_exc / 2),
int(box_height / 2 - h_text * 0.75),
]
point_right = [
int(width / 2 - w_text / 2 + w_exc / 2 + w_triangle / 2),
int(box_height / 2 + h_text * 0.75),
]
# add triangle on image
cv2.polylines(
img,
np.int32(np.array([[point_left, point_up, point_right]])),
True,
WARNING_COLOR,
5,
)
# point coordinates for right warning triangle
point_left[0] = point_left[0] + w_text - w_exc
point_up[0] = point_up[0] + w_text - w_exc
point_right[0] = point_right[0] + w_text - w_exc
# add right triangle on image
cv2.polylines(
img,
np.int32(np.array([[point_left, point_up, point_right]])),
True,
WARNING_COLOR,
5,
)
return img
def add_counter_text(img, box_shape, people_in):
"""
Add person counter text on the image
Args:
img (np.array): Image
box_shape (tuple): (width, height) of the counter box
people_in (int): Number representing the amount of
people inside the space
Returns:
(np.array): Updated image
"""
box_width, box_height = box_shape
img_pil = Image.fromarray(img)
draw = ImageDraw.Draw(img_pil)
# set in/capacity numbers
text_in = "{}".format(people_in)
text_cap = "{}".format(CAPACITY)
# import constants for re-use
TEXT_COUNTER_UP = TEXT_CONF["TEXT_COUNTER_UP"]
TEXT_COUNTER_DOWN = TEXT_CONF["TEXT_COUNTER_DOWN"]
# get shapes for parts of text
w_up, h_up = draw.textsize(TEXT_COUNTER_UP, stroke_width=1, font=FONT_SMALL)
w_down, h_down = draw.textsize(TEXT_COUNTER_DOWN, stroke_width=1, font=FONT_SMALL)
w_in, h_in = draw.textsize(text_in, stroke_width=1, font=FONT_SMALL)
w_cap, h_cap = draw.textsize(text_cap, stroke_width=1, font=FONT_SMALL)
w_slash, h_slash = draw.textsize(" / ", stroke_width=1, font=FONT_SMALL)
# calculate coordinates for each part of the text
textX_up = int((box_width - w_up) / 2)
textY_up = int(0.05 * box_height)
textX_down = int((box_width - w_down) / 2)
textY_down = int(0.1 * box_height + h_up)
textX_in = int((box_width - w_slash) / 2 - w_in)
textY_stat = int(0.2 * box_height + h_down + h_up)
textX_slash = int((box_width - w_slash) / 2)
textX_cap = int((box_width + w_slash) / 2)
# add text on image
draw.text(
(textX_up, textY_up),
TEXT_COUNTER_UP,
font=FONT_SMALL,
fill=WHITE,
stroke_width=1,
)
draw.text(
(textX_down, textY_down),
TEXT_COUNTER_DOWN,
font=FONT_SMALL,
fill=WHITE,
stroke_width=1,
)
draw.text(
(textX_in, textY_stat),
text_in,
font=FONT_SMALL,
fill=(0, 255, 0),
stroke_width=1,
)
draw.text(
(textX_slash, textY_stat), " / ", font=FONT_SMALL, fill=WHITE, stroke_width=1
)
draw.text(
(textX_cap, textY_stat), text_cap, font=FONT_SMALL, fill=WHITE, stroke_width=1
)
img = np.array(img_pil, dtype="uint8")
return img
def counter_overlay(img, people_in, people_on_frame, masks_on=False):
"""
Implements overlay/message logic. Adds messages on image.
Args:
img (np.array): Image
people_in (int): Number representing the amount of
people inside the space
people_on_frame (bool): True/False whether there are
people on frame or not
masks_on (bool): True/False whether everyone on
frame wears a mask or not
Returns:
(np.array): Updated image
"""
box_width = int(0.168 * img.shape[1])
box_height = int(0.142 * img.shape[0])
overlay = img.copy()
if masks_on or not people_on_frame:
overlay = cv2.rectangle(
overlay, (0, 0), (box_width, box_height), OVERLAY_COLOR, -1
)
# otherwise for adding warning message we take overlay on full width
else:
overlay = cv2.rectangle(
overlay, (0, 0), (img.shape[1], box_height), OVERLAY_COLOR, -1
)
img = cv2.addWeighted(overlay, OVERLAY_ALPHA, img, 1 - OVERLAY_ALPHA, 0, img)
# add counter text on the overlay
img = add_counter_text(img, (box_width, box_height), people_in)
# if there are no people on frame just return, no need for messages
if not people_on_frame:
return img
# if masks are on add 'thanks' message on bottom
if masks_on:
# if masks are on add 'thanks' message on bottom
img = lower_overlay(img, TEXT_CONF["TEXT_DANKE_DOWN"])
else:
img = add_warning_text(img, box_height)
img = lower_overlay(img, TEXT_CONF["TEXT_BITTE_DOWN"])
return img
def lower_overlay(img, text):
"""
Add overlay on bottom of the image
Args:
img (np.array): Image
text (str): Message to add on the overlay
Returns:
(np.array): Updated image, with lower overlay
"""
# add overlay on bottom
overlay_height = int(0.13 * img.shape[0])
overlay = img.copy()
overlay = cv2.rectangle(
overlay,
(0, img.shape[0] - overlay_height),
(img.shape[1], img.shape[0]),
OVERLAY_COLOR,
-1,
)
img = cv2.addWeighted(overlay, OVERLAY_ALPHA, img, 1 - OVERLAY_ALPHA, 0, img)
# transform image to PIL to add text on it
img_pil = Image.fromarray(img)
draw = ImageDraw.Draw(img_pil)
# get width/height of text
w, h = draw.textsize(text, stroke_width=1, font=FONT_LARGE)
# calculate coordinates for text
textX = int((img.shape[1] - w) / 2)
textY = int(img.shape[0] - (overlay_height + h) / 2)
# add text on bottom overlay
draw.text((textX, textY), text, font=FONT_LARGE, fill=WHITE, stroke_width=1)
img = np.array(img_pil)
return img
def add_view_messages(img, people_on_frame, masks_on):
"""
Add view messages on frame
Args:
img (np.array): Image
people_on_frame (bool): Flag if there are people on frame or not
mask_boxes (list):
Returns:
(np.array): Image, updated with messages on it
"""
image_area = img.shape[0] * img.shape[1]
# resize image to default resolution
img = cv2.resize(img, DISPLAY_CONF["RESOLUTION"])
# get the number of people inside the given space
people_in = DB.in_current()
# if place is full show the message for it
if people_in > CAPACITY:
return overcrowd_overlay(img)
# othewise show the messages according to the incoming people
# counter_overlay handles displaying messages about capacity and warnings
img = counter_overlay(img, people_in, people_on_frame, masks_on)
img = cv2.cvtColor(np.array(img, dtype="uint8"), cv2.COLOR_BGR2RGB)
return img
| 29.206818 | 87 | 0.646098 |
a6fbbbda33658c5e6fa44dfaf6e480487b0fb269
| 491 |
py
|
Python
|
libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/about.py
|
PrettyWood/botbuilder-python
|
ab79f6b60066b05a00f729d6cb1d8bee30a786e2
|
[
"MIT"
] | null | null | null |
libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/about.py
|
PrettyWood/botbuilder-python
|
ab79f6b60066b05a00f729d6cb1d8bee30a786e2
|
[
"MIT"
] | null | null | null |
libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/about.py
|
PrettyWood/botbuilder-python
|
ab79f6b60066b05a00f729d6cb1d8bee30a786e2
|
[
"MIT"
] | 1 |
2022-02-24T10:23:28.000Z
|
2022-02-24T10:23:28.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
__title__ = "botbuilder-adapters-slack"
__version__ = (
os.environ["packageVersion"] if "packageVersion" in os.environ else "4.11.0"
)
__uri__ = "https://www.github.com/Microsoft/botbuilder-python"
__author__ = "Microsoft"
__description__ = "Microsoft Bot Framework Bot Builder"
__summary__ = "Microsoft Bot Framework Bot Builder SDK for Python."
__license__ = "MIT"
| 32.733333 | 81 | 0.741344 |
f3c5eff585209a747917f596ca483c2455deb6fb
| 3,701 |
py
|
Python
|
tests/test_airterminalsingleductvavnoreheat.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 19 |
2015-12-08T23:33:51.000Z
|
2022-01-31T04:41:10.000Z
|
tests/test_airterminalsingleductvavnoreheat.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 2 |
2019-10-04T10:57:00.000Z
|
2021-10-01T06:46:17.000Z
|
tests/test_airterminalsingleductvavnoreheat.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 7 |
2015-11-04T02:25:01.000Z
|
2021-12-08T03:14:28.000Z
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.zone_hvac_air_loop_terminal_units import AirTerminalSingleDuctVavNoReheat
log = logging.getLogger(__name__)
class TestAirTerminalSingleDuctVavNoReheat(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_airterminalsingleductvavnoreheat(self):
pyidf.validation_level = ValidationLevel.error
obj = AirTerminalSingleDuctVavNoReheat()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_availability_schedule_name = "object-list|Availability Schedule Name"
obj.availability_schedule_name = var_availability_schedule_name
# node
var_air_outlet_node_name = "node|Air Outlet Node Name"
obj.air_outlet_node_name = var_air_outlet_node_name
# node
var_air_inlet_node_name = "node|Air Inlet Node Name"
obj.air_inlet_node_name = var_air_inlet_node_name
# real
var_maximum_air_flow_rate = 0.0
obj.maximum_air_flow_rate = var_maximum_air_flow_rate
# alpha
var_zone_minimum_air_flow_input_method = "Constant"
obj.zone_minimum_air_flow_input_method = var_zone_minimum_air_flow_input_method
# real
var_constant_minimum_air_flow_fraction = 7.7
obj.constant_minimum_air_flow_fraction = var_constant_minimum_air_flow_fraction
# real
var_fixed_minimum_air_flow_rate = 8.8
obj.fixed_minimum_air_flow_rate = var_fixed_minimum_air_flow_rate
# object-list
var_minimum_air_flow_fraction_schedule_name = "object-list|Minimum Air Flow Fraction Schedule Name"
obj.minimum_air_flow_fraction_schedule_name = var_minimum_air_flow_fraction_schedule_name
# object-list
var_design_specification_outdoor_air_object_name = "object-list|Design Specification Outdoor Air Object Name"
obj.design_specification_outdoor_air_object_name = var_design_specification_outdoor_air_object_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.airterminalsingleductvavnoreheats[0].name, var_name)
self.assertEqual(idf2.airterminalsingleductvavnoreheats[0].availability_schedule_name, var_availability_schedule_name)
self.assertEqual(idf2.airterminalsingleductvavnoreheats[0].air_outlet_node_name, var_air_outlet_node_name)
self.assertEqual(idf2.airterminalsingleductvavnoreheats[0].air_inlet_node_name, var_air_inlet_node_name)
self.assertAlmostEqual(idf2.airterminalsingleductvavnoreheats[0].maximum_air_flow_rate, var_maximum_air_flow_rate)
self.assertEqual(idf2.airterminalsingleductvavnoreheats[0].zone_minimum_air_flow_input_method, var_zone_minimum_air_flow_input_method)
self.assertAlmostEqual(idf2.airterminalsingleductvavnoreheats[0].constant_minimum_air_flow_fraction, var_constant_minimum_air_flow_fraction)
self.assertAlmostEqual(idf2.airterminalsingleductvavnoreheats[0].fixed_minimum_air_flow_rate, var_fixed_minimum_air_flow_rate)
self.assertEqual(idf2.airterminalsingleductvavnoreheats[0].minimum_air_flow_fraction_schedule_name, var_minimum_air_flow_fraction_schedule_name)
self.assertEqual(idf2.airterminalsingleductvavnoreheats[0].design_specification_outdoor_air_object_name, var_design_specification_outdoor_air_object_name)
| 50.013514 | 162 | 0.773845 |
b87b3845d4367f8a3a1ab108656146ac638d6678
| 125 |
py
|
Python
|
tests/regression/RandomReg_500/ws_RandomReg_500_Ridge_mysql_code_gen.py
|
antoinecarme/sklearn2sql_heroku
|
d680db10683daa419324461eeea851dd8b103ad5
|
[
"BSD-3-Clause"
] | 1 |
2019-07-09T14:45:18.000Z
|
2019-07-09T14:45:18.000Z
|
tests/regression/RandomReg_500/ws_RandomReg_500_Ridge_mysql_code_gen.py
|
antoinecarme/sklearn2sql_heroku
|
d680db10683daa419324461eeea851dd8b103ad5
|
[
"BSD-3-Clause"
] | 5 |
2017-11-13T13:35:37.000Z
|
2021-11-11T12:57:20.000Z
|
tests/regression/RandomReg_500/ws_RandomReg_500_Ridge_mysql_code_gen.py
|
antoinecarme/sklearn2sql_heroku
|
d680db10683daa419324461eeea851dd8b103ad5
|
[
"BSD-3-Clause"
] | 1 |
2021-09-19T15:05:33.000Z
|
2021-09-19T15:05:33.000Z
|
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("Ridge" , "RandomReg_500" , "mysql")
| 25 | 66 | 0.792 |
795829e777b58f466017324eee8d5eba23bfc74e
| 1,050 |
py
|
Python
|
largest_rectangle_histogram.py
|
pranavdave893/Leetcode
|
1f30ea37af7b60585d168b15d9397143f53c92a1
|
[
"MIT"
] | null | null | null |
largest_rectangle_histogram.py
|
pranavdave893/Leetcode
|
1f30ea37af7b60585d168b15d9397143f53c92a1
|
[
"MIT"
] | null | null | null |
largest_rectangle_histogram.py
|
pranavdave893/Leetcode
|
1f30ea37af7b60585d168b15d9397143f53c92a1
|
[
"MIT"
] | null | null | null |
from typing import List
class Solution:
def largestRectangleArea(self, height: List[int]) -> int:
stack = [-1]
height.append(0)
ans = 0
for i in range(len(height)):
while height[i] < height[stack[-1]]:
h = height[stack.pop()]
w = i - stack[-1] - 1
ans = max(ans, h*w)
stack.append(i)
return ans
def largestRectangleArea_divide(self, heights: List[int]) -> int:
def divide(start:int, end:int) -> int:
if start > end:
return 0
min_idx = start
for idx in range(start, end+1):
if heights[min_idx] > heights[idx]:
min_idx = idx
return max(heights[min_idx] * (end - start + 1), max(divide(start, min_idx-1), divide(min_idx+1, end)))
return divide(0, len(heights)-1)
abc = Solution()
print (abc.largestRectangleArea([6,7,5,2,4,5,9,3]))
| 30 | 115 | 0.480952 |
0d0085cc7af5fc569be7cf6c1fd433f90bd800b1
| 8,839 |
py
|
Python
|
sample_project/env/lib/python3.9/site-packages/qtpy/QtGui.py
|
Istiakmorsalin/ML-Data-Science
|
681e68059b146343ef55b0671432dc946970730d
|
[
"MIT"
] | 4 |
2021-11-19T03:25:13.000Z
|
2022-02-24T15:32:30.000Z
|
sample_project/env/lib/python3.9/site-packages/qtpy/QtGui.py
|
Istiakmorsalin/ML-Data-Science
|
681e68059b146343ef55b0671432dc946970730d
|
[
"MIT"
] | null | null | null |
sample_project/env/lib/python3.9/site-packages/qtpy/QtGui.py
|
Istiakmorsalin/ML-Data-Science
|
681e68059b146343ef55b0671432dc946970730d
|
[
"MIT"
] | 3 |
2020-08-04T02:48:32.000Z
|
2020-08-17T01:20:09.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright © 2014-2015 Colin Duquesnoy
# Copyright © 2009- The Spyder Development Team
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
"""
Provides QtGui classes and functions.
.. warning:: Only PyQt4/PySide QtGui classes compatible with PyQt5.QtGui are
exposed here. Therefore, you need to treat/use this package as if it were
the ``PyQt5.QtGui`` module.
"""
import warnings
from . import PYQT5, PYQT4, PYSIDE, PYSIDE2, PythonQtError
if PYQT5:
from PyQt5.QtGui import *
elif PYSIDE2:
from PySide2.QtGui import *
elif PYQT4:
try:
# Older versions of PyQt4 do not provide these
from PyQt4.QtGui import (QGlyphRun, QMatrix2x2, QMatrix2x3,
QMatrix2x4, QMatrix3x2, QMatrix3x3,
QMatrix3x4, QMatrix4x2, QMatrix4x3,
QMatrix4x4, QTouchEvent, QQuaternion,
QRadialGradient, QRawFont, QStaticText,
QVector2D, QVector3D, QVector4D,
qFuzzyCompare)
except ImportError:
pass
try:
from PyQt4.Qt import QKeySequence, QTextCursor
except ImportError:
# In PyQt4-sip 4.19.13 QKeySequence and QTextCursor are in PyQt4.QtGui
from PyQt4.QtGui import QKeySequence, QTextCursor
from PyQt4.QtGui import (QAbstractTextDocumentLayout, QActionEvent, QBitmap,
QBrush, QClipboard, QCloseEvent, QColor,
QConicalGradient, QContextMenuEvent, QCursor,
QDoubleValidator, QDrag,
QDragEnterEvent, QDragLeaveEvent, QDragMoveEvent,
QDropEvent, QFileOpenEvent, QFocusEvent, QFont,
QFontDatabase, QFontInfo, QFontMetrics,
QFontMetricsF, QGradient, QHelpEvent,
QHideEvent, QHoverEvent, QIcon, QIconDragEvent,
QIconEngine, QImage, QImageIOHandler, QImageReader,
QImageWriter, QInputEvent, QInputMethodEvent,
QKeyEvent, QLinearGradient,
QMouseEvent, QMoveEvent, QMovie,
QPaintDevice, QPaintEngine, QPaintEngineState,
QPaintEvent, QPainter, QPainterPath,
QPainterPathStroker, QPalette, QPen, QPicture,
QPictureIO, QPixmap, QPixmapCache, QPolygon,
QPolygonF, QRegExpValidator, QRegion, QResizeEvent,
QSessionManager, QShortcutEvent, QShowEvent,
QStandardItem, QStandardItemModel,
QStatusTipEvent, QSyntaxHighlighter, QTabletEvent,
QTextBlock, QTextBlockFormat, QTextBlockGroup,
QTextBlockUserData, QTextCharFormat,
QTextDocument, QTextDocumentFragment,
QTextDocumentWriter, QTextFormat, QTextFragment,
QTextFrame, QTextFrameFormat, QTextImageFormat,
QTextInlineObject, QTextItem, QTextLayout,
QTextLength, QTextLine, QTextList, QTextListFormat,
QTextObject, QTextObjectInterface, QTextOption,
QTextTable, QTextTableCell, QTextTableCellFormat,
QTextTableFormat, QTransform,
QValidator, QWhatsThisClickedEvent, QWheelEvent,
QWindowStateChangeEvent, qAlpha, qBlue,
qGray, qGreen, qIsGray, qRed, qRgb,
qRgba, QIntValidator)
# QDesktopServices has has been split into (QDesktopServices and
# QStandardPaths) in Qt5
# It only exposes QDesktopServices that are still in pyqt5
from PyQt4.QtGui import QDesktopServices as _QDesktopServices
class QDesktopServices():
openUrl = _QDesktopServices.openUrl
setUrlHandler = _QDesktopServices.setUrlHandler
unsetUrlHandler = _QDesktopServices.unsetUrlHandler
def __getattr__(self, name):
attr = getattr(_QDesktopServices, name)
new_name = name
if name == 'storageLocation':
new_name = 'writableLocation'
warnings.warn(("Warning QDesktopServices.{} is deprecated in Qt5"
"we recommend you use QDesktopServices.{} instead").format(name, new_name),
DeprecationWarning)
return attr
QDesktopServices = QDesktopServices()
elif PYSIDE:
from PySide.QtGui import (QAbstractTextDocumentLayout, QActionEvent, QBitmap,
QBrush, QClipboard, QCloseEvent, QColor,
QConicalGradient, QContextMenuEvent, QCursor,
QDoubleValidator, QDrag,
QDragEnterEvent, QDragLeaveEvent, QDragMoveEvent,
QDropEvent, QFileOpenEvent, QFocusEvent, QFont,
QFontDatabase, QFontInfo, QFontMetrics,
QFontMetricsF, QGradient, QHelpEvent,
QHideEvent, QHoverEvent, QIcon, QIconDragEvent,
QIconEngine, QImage, QImageIOHandler, QImageReader,
QImageWriter, QInputEvent, QInputMethodEvent,
QKeyEvent, QKeySequence, QLinearGradient,
QMatrix2x2, QMatrix2x3, QMatrix2x4, QMatrix3x2,
QMatrix3x3, QMatrix3x4, QMatrix4x2, QMatrix4x3,
QMatrix4x4, QMouseEvent, QMoveEvent, QMovie,
QPaintDevice, QPaintEngine, QPaintEngineState,
QPaintEvent, QPainter, QPainterPath,
QPainterPathStroker, QPalette, QPen, QPicture,
QPictureIO, QPixmap, QPixmapCache, QPolygon,
QPolygonF, QQuaternion, QRadialGradient,
QRegExpValidator, QRegion, QResizeEvent,
QSessionManager, QShortcutEvent, QShowEvent,
QStandardItem, QStandardItemModel,
QStatusTipEvent, QSyntaxHighlighter, QTabletEvent,
QTextBlock, QTextBlockFormat, QTextBlockGroup,
QTextBlockUserData, QTextCharFormat, QTextCursor,
QTextDocument, QTextDocumentFragment,
QTextFormat, QTextFragment,
QTextFrame, QTextFrameFormat, QTextImageFormat,
QTextInlineObject, QTextItem, QTextLayout,
QTextLength, QTextLine, QTextList, QTextListFormat,
QTextObject, QTextObjectInterface, QTextOption,
QTextTable, QTextTableCell, QTextTableCellFormat,
QTextTableFormat, QTouchEvent, QTransform,
QValidator, QVector2D, QVector3D, QVector4D,
QWhatsThisClickedEvent, QWheelEvent,
QWindowStateChangeEvent, qAlpha, qBlue,
qGray, qGreen, qIsGray, qRed, qRgb, qRgba,
QIntValidator)
# QDesktopServices has has been split into (QDesktopServices and
# QStandardPaths) in Qt5
# It only exposes QDesktopServices that are still in pyqt5
from PySide.QtGui import QDesktopServices as _QDesktopServices
class QDesktopServices():
openUrl = _QDesktopServices.openUrl
setUrlHandler = _QDesktopServices.setUrlHandler
unsetUrlHandler = _QDesktopServices.unsetUrlHandler
def __getattr__(self, name):
attr = getattr(_QDesktopServices, name)
new_name = name
if name == 'storageLocation':
new_name = 'writableLocation'
warnings.warn(("Warning QDesktopServices.{} is deprecated in Qt5"
"we recommend you use QDesktopServices.{} instead").format(name, new_name),
DeprecationWarning)
return attr
QDesktopServices = QDesktopServices()
else:
raise PythonQtError('No Qt bindings could be found')
| 54.561728 | 104 | 0.552438 |
f2c85a161d5d789fe7727b5480325c54a3008cdc
| 29,438 |
py
|
Python
|
armi/reactor/reactorParameters.py
|
ZanderUF/armi
|
c55ebe4d77821d3357ddd3326478ffaf44962c89
|
[
"Apache-2.0"
] | 1 |
2022-01-23T06:09:50.000Z
|
2022-01-23T06:09:50.000Z
|
armi/reactor/reactorParameters.py
|
ZanderUF/armi
|
c55ebe4d77821d3357ddd3326478ffaf44962c89
|
[
"Apache-2.0"
] | null | null | null |
armi/reactor/reactorParameters.py
|
ZanderUF/armi
|
c55ebe4d77821d3357ddd3326478ffaf44962c89
|
[
"Apache-2.0"
] | 1 |
2020-08-26T09:02:06.000Z
|
2020-08-26T09:02:06.000Z
|
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Reactor parameter definitions
"""
import numpy
from armi.utils import units
from armi.reactor import parameters
from armi.reactor.parameters import ParamLocation
from armi.reactor import geometry
def defineReactorParameters():
pDefs = parameters.ParameterDefinitionCollection()
pDefs.add(
parameters.Parameter(
"rdIterNum",
units="int",
description="Number of region-density equilibrium iterations",
location=ParamLocation.AVERAGE,
saveToDB=True,
default=parameters.NoDefault,
setter=parameters.NoDefault,
categories=set(),
)
)
with pDefs.createBuilder(location=ParamLocation.AVERAGE, default=0.0) as pb:
pb.defParam(
"cycle",
units="int",
description="current cycle of the simulation",
default=0,
)
pb.defParam(
"cycleLength",
units="EFP days",
description="The cycle length of the reactor while power is being produced",
)
pb.defParam(
"availabilityFactor",
units="fraction",
description="Availability factor of the plant. This is the fraction of the time that "
"the plant is operating.",
default=1.0,
)
pb.defParam(
"capacityFactor",
units="fraction",
description="The fraction of power produced by the plant this cycle over the "
"full-power, 100% uptime potential of the plant.",
default=1.0,
)
pb.defParam("lcoe", units="$/kWh", description="Levelised cost of electricity")
pb.defParam(
"time",
units="yr",
description="time of reactor life from BOL to current time node",
categories=["depletion"],
)
pb.defParam("timeNode", units="", description="timeNode", default=0)
with pDefs.createBuilder(
location=ParamLocation.AVERAGE, default=0.0, categories=["economics"]
) as pb:
pb.defParam(
"eFeedMT",
units="MT",
description="Total feed material required in reactor economics",
)
pb.defParam(
"eFissile",
units="MT",
description="Fissile mass required in reactor economics",
)
pb.defParam(
"eFuelCycleCost",
units="$/MT",
description="Cost of fuel cycle in an equilibrium-mode in reactor economics",
)
pb.defParam(
"eFuelCycleCostRate",
units="$/year",
description="Rate of fuel cycle cost in an equilibrium mode in reactor economics",
)
pb.defParam(
"eProduct",
units="MT",
description="Total mass of manufactured fuel in reactor economics",
)
pb.defParam(
"eSWU",
units="kgSWU",
description="Separative work units in reactor economics",
)
pb.defParam(
"eTailsMT", units="MT", description="Depleted Uranium in reactor economics"
)
return pDefs
def defineCoreParameters():
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
def detailedNucKeys(self, value):
if value is None or isinstance(value, numpy.ndarray):
self._p_detailedNucKeys = value
else:
self._p_detailedNucKeys = numpy.array(value)
pb.defParam(
"detailedNucKeys",
setter=detailedNucKeys,
units="ZZZAAA (ZZZ atomic number, AAA mass number, + 100 * m for metastable states",
description="Nuclide vector keys, used to map densities in b.p.detailedNDens and a.p.detailedNDens",
saveToDB=True,
default=None,
)
with pDefs.createBuilder(location=ParamLocation.CENTROID) as pb:
pb.defParam(
"orientation",
units="degrees",
description=(
"Triple representing rotations counterclockwise around each spatial axis. For example, "
"a hex assembly rotated by 1/6th has orientation (0,0,60.0)"
),
default=None,
)
with pDefs.createBuilder(location=ParamLocation.AVERAGE, default=0.0) as pb:
pb.defParam("currentPercentExpanded", units="", description="")
pb.defParam(
"maxAssemNum", units=None, description="maximum assembly number", default=0
)
pb.defParam(
"numAssembliesFabricated",
units="",
description="numAssembliesFabricated",
default=0,
)
pb.defParam(
"numAssembliesInSFP", units="", description="numAssembliesInSFP", default=0
)
pb.defParam("numMoves", units="", description="numMoves", default=0)
pb.defParam("timingDepletion", units="", description="timingDepletion")
pb.defParam("timingDif3d", units="", description="timingDif3d")
pb.defParam("timingDistribute", units="", description="timingDistribute")
pb.defParam("timingMc2", units="", description="timingMc2")
pb.defParam("timingSubchan", units="", description="timingSubchan")
with pDefs.createBuilder(default=0.0, location="N/A") as pb:
pb.defParam(
"breedingRatio2",
units="N/A",
description="Ratio of fissile Burned and discharged to fissile discharged",
saveToDB=False,
)
pb.defParam(
"crWorthRequiredPrimary",
units="$",
description="The total worth in $ required for primary control rods to shutdown reactor accounting for uncertainties and margins",
)
pb.defParam(
"crWorthRequiredSecondary",
units="$",
description="The total worth in $ required for secondary control rods to shutdown reactor accounting for uncertainties and margins",
)
pb.defParam(
"critSearchSlope", units=None, description="Critical keff search slope"
)
pb.defParam(
"directPertKeff",
units=None,
description="K-eff is computed for the perturbed case with a direct calculation",
)
pb.defParam(
"distortionReactivity",
units="pcm",
description="The reactivity effect of the current distortions",
default=None,
)
pb.defParam(
"doublingTime",
units="EFPY",
description="The time it takes to produce enough spent fuel to fuel a daughter reactor",
)
pb.defParam("fissileMass", units="g", description="Fissile mass of the reactor")
pb.defParam(
"heavyMetalMass", units="g", description="Heavy Metal mass of the reactor"
)
pb.defParam(
"innerMatrixIndex",
units=None,
description="The item index of the inner matrix in an optimization case",
)
pb.defParam("keffUnc", units=None, description="Uncontrolled keff")
pb.defParam(
"lastKeff",
units=None,
description="Previously calculated Keff for potential keff convergence",
)
pb.defParam(
"loadPadDpaAvg",
units="dpa",
description="The highest average dpa in any load pad",
)
pb.defParam(
"loadPadDpaPeak", units="dpa", description="The peak dpa in any load pad"
)
pb.defParam("maxcladFCCI", units="", description="", default=0.0)
pb.defParam(
"maxCladulof",
units=units.DEGC,
description="Max Clading Temperature in Unprotected Loss of Flow (ULOF) transient",
)
pb.defParam(
"maxCladulohs",
units=units.DEGC,
description="Max Clading Temperature in Unprotected Loss of Heat Sink (ULOHS) transient",
)
pb.defParam(
"maxCladutop",
units=units.DEGC,
description="Max Clading Temperature in Unprotected Transient Overpower (UTOP) transient",
)
pb.defParam(
"maxCladptop",
units=units.DEGC,
description="Max Clading Temperature in protected Transient Overpower (PTOP) transient",
)
pb.defParam(
"maxCladlockrotor",
units=units.DEGC,
description="Max Clading Temperature in lock rotor transient",
)
pb.defParam(
"maxCladplohs",
units=units.DEGC,
description="Max Clading Temperature in protected loss of heat sink (PLOHS) transient",
)
pb.defParam(
"maxCladplof",
units=units.DEGC,
description="Max Clading Temperature in protected loss of flow (PLOF) transient",
)
pb.defParam(
"maxCladplof2pump",
units=units.DEGC,
description="Max Clading Temperature in protected loss of 2 primary pumps (PLOF2pump) transient",
)
pb.defParam(
"maxCladoscillation",
units=units.DEGC,
description="Max Clading Temperature in oscillation-driven transient",
)
pb.defParam(
"maxFueloscillation",
units=units.DEGC,
description="Max Fuel Temperature in oscillation-driven transient",
)
pb.defParam(
"maxCladpowerdefect",
units=units.DEGC,
description="Max Clading Temperature in powerdefect transient",
)
pb.defParam(
"maxFuelpowerdefect",
units=units.DEGC,
description="Max Fuel Temperature in powerdefect transient",
)
pb.defParam(
"maxCladsteadystate",
units=units.DEGC,
description="Max Clading Temperature in steady state transient",
)
pb.defParam(
"maxDPA",
units="dpa",
description="Maximum DPA based on pin-level max if it exists, block level max otherwise",
)
pb.defParam("maxFuelulof", units=units.DEGC, description="maxFuelulof")
pb.defParam("maxFuelulohs", units=units.DEGC, description="maxFuelulohs")
pb.defParam("maxFuelutop", units=units.DEGC, description="maxFuelutop")
pb.defParam(
"maxFuelptop",
units=units.DEGC,
description="Max Clading Temperature in protected Transient Overpower (PTOP) transient",
)
pb.defParam(
"maxFuellockrotor",
units=units.DEGC,
description="Max Clading Temperature in lock rotor transient",
)
pb.defParam(
"maxFuelplohs",
units=units.DEGC,
description="Max Clading Temperature in protected loss of heat sink (PLOHS) transient",
)
pb.defParam(
"maxFuelplof",
units=units.DEGC,
description="Max Clading Temperature in protected loss of flow (PLOF) transient",
)
pb.defParam(
"maxFuelplof2pump",
units=units.DEGC,
description="Max Clading Temperature in protected loss of 2 primary pumps (PLOF2pump) transient",
)
pb.defParam("maxGridDpa", units="dpa", description="Grid plate max dpa")
pb.defParam(
"maxProcessMemoryInMB",
units="MB",
description="Maximum memory used by an ARMI process",
)
pb.defParam(
"maxTH2SigmaCladIDT",
units=units.DEGC,
description="Max 2-sigma temperature of the inner-diameter of the cladding",
default=0.0,
categories=["block-max"],
)
pb.defParam(
"maxTranPCT",
units=units.DEGC,
description="Max Peak Clading Temperature of transients",
)
pb.defParam(
"minProcessMemoryInMB",
units="MB",
description="Minimum memory used by an ARMI process",
)
pb.defParam(
"minutesSinceStart",
units="min",
description="Run time since the beginning of the calculation",
)
pb.defParam(
"outsideFuelRing",
units="int",
description="The ring with the fraction of flux that best meets the target",
)
pb.defParam(
"outsideFuelRingFluxFr",
units=None,
description="Ratio of the flux in a ring to the total reactor fuel flux",
)
pb.defParam(
"peakGridDpaAt60Years",
units="dpa",
description="Grid plate peak dpa after 60 years irradiation",
)
pb.defParam(
"topInitiator",
units="$",
description="Worth in $ of most valuable rod in critical position",
)
pb.defParam(
"totalIntrinsicSource",
units="neutrons/s",
description="Full core intrinsic neutron source from spontaneous fissions before a decay period",
)
pb.defParam(
"totalIntrinsicSourceDecayed",
units="neutrons/s",
description="Full core intrinsic source from spontaneous fissions after a decay period",
)
with pDefs.createBuilder(
location=ParamLocation.AVERAGE, default=0.0, categories=["thermal hydraulics"]
) as pb:
pb.defParam(
"assemblyPumpHead",
units="Pa",
description="Pressure drop for the max power assembly in zone",
)
pb.defParam(
"CoreAvgTOut",
units=units.DEGC,
description="Core average outlet temperature",
)
pb.defParam("CoreMdot", units="kg/s", description="Mass flow rate of full core")
pb.defParam(
"outletTempIdeal",
units=units.DEGC,
description="Average outlet tempeture loop through all assemblies after doing TH",
)
pb.defParam(
"SCMaxDilationPressure",
units="Pa",
description="The maximum dilation pressure in the core",
)
pb.defParam(
"SCorificeEfficiency",
units=None,
description="Ratio of total flow rate for the optimized orificing scheme to total flow rate for an ideal orificing scheme",
)
pb.defParam(
"SCovercoolingRatio",
units=None,
description="Ratio of the max flow rate to the average flow rate",
)
pb.defParam(
"THmaxDeltaPPump",
units="Pa",
description="The maximum pumping pressure rise required to pump the given mass flow rate through the rod bundle",
)
pb.defParam(
"THmaxDilationPressure", units="", description="THmaxDilationPressure"
)
pb.defParam(
"THoutletTempIdeal",
units=units.DEGC,
description="Average outlet temperature loop through all assemblies after doing TH",
)
pb.defParam("vesselTemp", units=units.DEGC, description="vessel temperature")
pb.defParam(
"LMDT",
units=units.DEGC,
description="Log mean temperature difference in heat exchanger",
)
pb.defParam(
"peakTemperature",
units=units.DEGC,
description="peak temperature anywhere in the reactor",
)
with pDefs.createBuilder(
location=ParamLocation.AVERAGE, default=0.0, categories=["neutronics"]
) as pb:
pb.defParam(
"maxdetailedDpaPeak",
units="dpa",
description="Highest peak dpa of any block in the problem",
)
pb.defParam(
"maxFlux", units="n/cm^2/s", description="Max neutron flux in the core"
)
pb.defParam(
"adjWeightedFisSrc",
units="1/cm^2/s^2",
description="Volume-integrated adjoint flux weighted fission source",
)
pb.defParam(
"maxDetailedDpaThisCycle",
units="dpa",
description="Max increase in dpa this cycle (only defined at EOC)",
)
pb.defParam(
"dpaFullWidthHalfMax",
units="cm",
description="Full width at half max of the detailedDpa distribution",
)
pb.defParam(
"elevationOfACLP3Cycles",
units="cm",
description="minimum axial location of the ACLP for 3 cycles at peak dose",
)
pb.defParam(
"elevationOfACLP7Cycles",
units="cm",
description="minimum axial location of the ACLP for 7 cycles at peak dose",
)
pb.defParam(
"maxpercentBu",
units="%FIMA",
description="Max percent burnup on any block in the problem",
)
pb.defParam("rxSwing", units="pcm", description="Reactivity swing")
pb.defParam(
"maxBuF",
units="%",
description="Maximum burnup seen in any feed assemblies",
)
pb.defParam(
"maxBuI",
units="%",
description="Maximum burnup seen in any igniter assemblies",
)
pb.defParam("keff", units=None, description="Global multiplication factor")
pb.defParam(
"partisnKeff",
units=None,
description="Global multiplication factor from PARTISN transport calculation",
)
pb.defParam(
"peakKeff", units=None, description="Maximum keff in the simulation"
)
pb.defParam(
"fastFluxFrAvg", units=None, description="Fast flux fraction average"
)
pb.defParam(
"leakageFracTotal", units=None, description="Total leakage fraction"
)
pb.defParam(
"leakageFracPlanar", units=None, description="Leakage fraction in planar"
)
pb.defParam(
"leakageFracAxial",
units=None,
description="Leakage fraction in axial direction",
)
pb.defParam(
"maxpdens",
units="W/cm^3",
description="Maximum avg. volumetric power density of all blocks",
)
pb.defParam(
"maxPD",
units="MW/m^2",
description="Maximum areal power density of all assemblies",
)
pb.defParam(
"jumpRing",
units=None,
description=(
"Radial ring number where bred-up fuel assemblies shuffle jump from the low power to the "
"high power region."
),
)
with pDefs.createBuilder(
default=0.0,
location=ParamLocation.AVERAGE,
categories=["reactivity coefficients"],
) as pb:
pb.defParam("axial", units="cents/K", description="Axial expansion coefficient")
pb.defParam("doppler", units="cents/K", description="Doppler coefficient")
pb.defParam(
"dopplerConst", units="cents * K^(n-1)", description="Doppler constant"
)
pb.defParam(
"fuelDensity", units="cents/K", description="Fuel temperature coefficient"
)
pb.defParam(
"coolantDensity",
units="cents/K",
description="Coolant temperature coefficient",
)
pb.defParam(
"totalCoolantDensity",
units="cents/K",
description="Coolant temperature coefficient weighted to include bond and interstitial effects",
)
pb.defParam(
"cladDensity", units="cents/K", description="Clad temperature coefficient"
)
pb.defParam(
"structureDensity",
units="cents/K",
description="Structure temperature coefficient",
)
pb.defParam(
"Voideddoppler", units="cents/K", description="Voided Doppler coefficient"
)
pb.defParam(
"VoideddopplerConst",
units="cents * K^(n-1)",
description="Voided Doppler constant",
)
pb.defParam("voidWorth", units="$", description="Coolant void worth")
pb.defParam("voidedKeff", units=None, description="Voided keff")
pb.defParam(
"radialHT9",
units="cents/K",
description="Radial expansion coefficient when driven by thermal expansion of HT9.",
)
pb.defParam(
"radialSS316",
units="cents/K",
description="Radial expansion coefficient when driven by thermal expansion of SS316.",
)
with pDefs.createBuilder(
default=0.0,
location=ParamLocation.AVERAGE,
categories=["reactivity coefficients", "kinetics"],
) as pb:
pb.defParam(
"beta",
units=None,
description="Effective delayed neutron fraction",
default=None,
)
pb.defParam(
"betaComponents",
units=None,
description="Group-wise delayed neutron fractions.",
default=None,
)
pb.defParam(
"betaDecayConstants",
units="1/s",
description="Group-wise precursor decay constants",
default=None,
)
pb.defParam(
"promptNeutronGenerationTime",
units="s",
description="Prompt neutron generation time",
)
pb.defParam(
"promptNeutronLifetime", units="s", description="Prompt neutron lifetime"
)
with pDefs.createBuilder(
default=0.0,
location=ParamLocation.AVERAGE,
categories=["reactivity coefficients", "core wide"],
) as pb:
# CORE WIDE REACTIVITY COEFFICIENTS
pb.defParam(
"rxFuelAxialExpansionCoeffPerTemp",
units="dk/kk'-K",
description="Fuel Axial Expansion Coefficient",
)
pb.defParam(
"rxGridPlateRadialExpansionCoeffPerTemp",
units="dk/kk'-K",
description="Grid Plate Radial Expansion Coefficient",
)
pb.defParam(
"rxAclpRadialExpansionCoeffPerTemp",
units="dk/kk'-K",
description="ACLP Radial Expansion Coefficient",
)
pb.defParam(
"rxControlRodDrivelineExpansionCoeffPerTemp",
units="dk/kk'-K",
description="control rod driveline expansion coefficient",
)
pb.defParam(
"rxCoreWideCoolantVoidWorth",
units="dk/kk'",
description="Core-Wide Coolant Void Worth",
)
pb.defParam(
"rxSpatiallyDependentCoolantVoidWorth",
units="dk/kk'",
description="Spatially-Dependent Coolant Void Worth",
)
# FUEL COEFFICIENTS
pb.defParam(
"rxFuelDensityCoeffPerTemp",
units="dk/kk'-K",
description="Fuel Density Coefficient",
)
pb.defParam(
"rxFuelDopplerCoeffPerTemp",
units="dk/kk'-K",
description="Fuel Doppler Coefficient",
)
pb.defParam(
"rxFuelDopplerConstant",
units="dk/kk' K**(n-1)",
description="Fuel Doppler Constant",
)
pb.defParam(
"rxFuelVoidedDopplerCoeffPerTemp",
units="dk/kk'-K",
description="Fuel Voided-Coolant Doppler Coefficient",
)
pb.defParam(
"rxFuelVoidedDopplerConstant",
units="dk/kk' K**(n-1)",
description="Fuel Voided-Coolant Doppler Constant",
)
pb.defParam(
"rxFuelTemperatureCoeffPerTemp",
units="dk/kk'-K",
description="Fuel Temperature Coefficient",
)
pb.defParam(
"rxFuelVoidedTemperatureCoeffPerTemp",
units="dk/kk'-K",
description="Fuel Voided-Coolant Temperature Coefficient",
)
# CLAD COEFFICIENTS
pb.defParam(
"rxCladDensityCoeffPerTemp",
units="dk/kk'-K",
description="Clad Density Coefficient",
)
pb.defParam(
"rxCladDopplerCoeffPerTemp",
units="dk/kk'-K",
description="Clad Doppler Coefficient",
)
pb.defParam(
"rxCladDopplerConstant",
units="dk/kk' K**(n-1)",
description="Clad Doppler Constant",
)
pb.defParam(
"rxCladTemperatureCoeffPerTemp",
units="dk/kk'-K",
description="Clad Temperature Coefficient",
)
# STRUCTURE COEFFICIENTS
pb.defParam(
"rxStructureDensityCoeffPerTemp",
units="dk/kk'-K",
description="Structure Density Coefficient",
)
pb.defParam(
"rxStructureDopplerCoeffPerTemp",
units="dk/kk'-K",
description="Structure Doppler Coefficient",
)
pb.defParam(
"rxStructureDopplerConstant",
units="dk/kk' K**(n-1)",
description="Structure Doppler Constant",
)
pb.defParam(
"rxStructureTemperatureCoeffPerTemp",
units="dk/kk'-K",
description="Structure Temperature Coefficient",
)
# COOLANT COEFFICIENTS
pb.defParam(
"rxCoolantDensityCoeffPerTemp",
units="dk/kk'-K",
description="Coolant Density Coefficient",
)
pb.defParam(
"rxCoolantTemperatureCoeffPerTemp",
units="dk/kk'-K",
description="Coolant Temperature Coefficient",
)
with pDefs.createBuilder(
location=ParamLocation.AVERAGE, categories=["equilibrium"]
) as pb:
pb.defParam("boecKeff", units=None, description="BOEC Keff", default=0.0)
pb.defParam(
"cyclics",
units="int",
description=(
"The number of cyclic mode equilibrium-cycle "
"iterations that have occurred so far"
),
default=0,
)
pb.defParam(
"maxCyclicNErr",
units=None,
description="Maximum relative number density error",
default=0.0,
)
with pDefs.createBuilder(
location=ParamLocation.AVERAGE, categories=["equilibrium"]
) as pb:
pb.defParam(
"breedingRatio",
units="N/A",
description="Breeding ratio of the reactor",
default=0.0,
)
pb.defParam("ConvRatioCore", units="?", description="?")
pb.defParam("absPerFisCore", units="?", description="?")
pb.defParam(
"axialExpansionPercent",
units="%",
description="Percent of axial growth of fuel blocks",
default=0.0,
)
pb.defParam("corePow", units="?", description="?")
pb.defParam("coupledIteration", units="?", description="?", default=0)
pb.defParam("fisFrac", units="?", description="?")
pb.defParam("fisRateCore", units="?", description="?")
pb.defParam(
"maxcladWastage",
units="microns",
description="Maximum clad wastage in any block in the core",
default=0.0,
categories=["block-max"],
)
pb.defParam(
"maxdilationTotal",
units="?",
description="?",
default=0.0,
categories=["block-max"],
)
pb.defParam(
"maxresidence",
units="?",
description="?",
default=0.0,
categories=["block-max"],
)
pb.defParam("medAbsCore", units="?", description="?")
pb.defParam("medFluxCore", units="?", description="?")
pb.defParam("medSrcCore", units="?", description="?")
pb.defParam("pkFlux", units="?", description="?")
pb.defParam(
"power",
units="W",
description="Rated thermal power of the reactor core. Corresponds to the "
"nuclear power generated in the core.",
)
pb.defParam(
"powerDecay",
units="W",
description="Decay power from decaying radionuclides",
)
return pDefs
| 29.855984 | 144 | 0.559413 |
269f3787c499f48606500efcfa8c3b698df87d36
| 4,482 |
py
|
Python
|
trax/shapes.py
|
yakovkeselman/trax
|
615432bbc58ffb5bdf83a771e8f8b470995456db
|
[
"Apache-2.0"
] | 1 |
2020-05-30T15:19:39.000Z
|
2020-05-30T15:19:39.000Z
|
trax/shapes.py
|
ZachT1711/trax
|
a0a3dd8d49e53fc48bb24cc08c10a8a53517e7bc
|
[
"Apache-2.0"
] | null | null | null |
trax/shapes.py
|
ZachT1711/trax
|
a0a3dd8d49e53fc48bb24cc08c10a8a53517e7bc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core class and functions for handling data abstractly as shapes/dtypes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class ShapeDtype(object):
"""A NumPy ndarray-like object abstracted as shape and dtype.
Main use is for representing input and output signatures.
"""
__slots__ = ['shape', 'dtype']
def __init__(self, shape, dtype=np.float32):
"""Creates a `ShapeDtype` instance, with canonicalized `shape` and `dtype`.
Args:
shape: A tuple or list, each element of which is an int or, less often,
`None`.
dtype: A `dtype` object, either from NumPy or TensorFlow.
Returns:
A `ShapeDtype` instance whose `shape` is a tuple and `dtype` is a NumPy
`dtype` object.
"""
# Canonicalize shape and dtype.
if isinstance(shape, list):
shape = tuple(shape)
if not isinstance(shape, tuple):
raise TypeError('shape must be tuple or list; got: {}'.format(shape))
if isinstance(dtype, tf.DType):
dtype = dtype.as_numpy_dtype
self.shape = shape
self.dtype = dtype
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.shape == other.shape
and self.dtype == other.dtype)
def __ne__(self, other):
return not self == other
def __repr__(self):
return 'ShapeDtype{{shape:{}, dtype:{}}}'.format(self.shape, self.dtype)
def __len__(self):
"""Returns length of 1; relevant to input and output signatures."""
return 1
def as_tuple(self):
return self.shape, self.dtype
def replace(self, **kwargs):
"""Creates a copy of the object with some parameters replaced."""
return type(self)(
shape=kwargs.pop('shape', self.shape),
dtype=kwargs.pop('dtype', self.dtype),
)
def signature(obj):
"""Returns a `ShapeDtype` signature for the given `obj`.
A signature is either a `ShapeDtype` instance or a tuple of `ShapeDtype`
instances. Note that this function is permissive with respect to its inputs
(accepts lists or tuples, and underlying objects can be any type as long as
they have shape and dtype attributes), but strict with respect to its outputs
(only `ShapeDtype`, and only tuples).
Args:
obj: An object that has `shape` and `dtype` attributes, or a list/tuple
of such objects.
Returns:
A single `ShapeDtype` instance if the signature has one element, else a
tuple of `ShapeDtype` instances.
"""
if isinstance(obj, (list, tuple)):
output = tuple(signature(x) for x in obj)
return output[0] if len(output) == 1 else output
else:
return ShapeDtype(obj.shape, obj.dtype)
def splice_signatures(*sigs):
"""Creates a new signature by splicing together any number of signatures.
The splicing effectively flattens the top level input signatures. For
instance, it would perform the following mapping:
- *sigs: sd1, (sd2, sd3, sd4), (), sd5
- return: (sd1, sd2, sd3, sd4, sd5)
Args:
*sigs: Any number of signatures. A signature is either a `ShapeDtype`
instance or a tuple of `ShapeDtype` instances.
Returns:
A single `ShapeDtype` instance if the spliced signature has one element,
else a tuple of `ShapeDtype` instances.
"""
result_sigs = []
for sig in sigs:
if isinstance(sig, (list, tuple)):
result_sigs.extend(sig)
else:
result_sigs.append(sig)
return result_sigs[0] if len(result_sigs) == 1 else tuple(result_sigs)
def assert_shape_equals(array, shape):
"""Asserts that an array has the given shape."""
assert array.shape == shape, (
'Invalid shape {}; expected {}.'.format(array.shape, shape)
)
def assert_same_shape(array1, array2):
"""Asserts that two arrays have the same shapes."""
assert_shape_equals(array1, array2.shape)
| 31.787234 | 79 | 0.693887 |
557db83a9a9a5aef8f2940f9bedafd655e88fac1
| 1,129 |
py
|
Python
|
configs/litehrnet/fcn_litehr18v2_split_512x512_40k_voc12aug.py
|
openvinotoolkit/mmsegmentation
|
9f50fc158be50594ea4aecf0a07ea652c91ec846
|
[
"Apache-2.0"
] | 3 |
2021-12-21T07:25:13.000Z
|
2022-02-07T01:59:19.000Z
|
configs/litehrnet/fcn_litehr18v2_split_512x512_40k_voc12aug.py
|
openvinotoolkit/mmsegmentation
|
9f50fc158be50594ea4aecf0a07ea652c91ec846
|
[
"Apache-2.0"
] | 13 |
2021-12-10T15:08:56.000Z
|
2022-03-23T08:58:03.000Z
|
configs/litehrnet/fcn_litehr18v2_split_512x512_40k_voc12aug.py
|
evgeny-izutov/mmsegmentation
|
9f50fc158be50594ea4aecf0a07ea652c91ec846
|
[
"Apache-2.0"
] | 3 |
2021-11-11T23:16:51.000Z
|
2021-12-08T23:49:29.000Z
|
_base_ = [
'../_base_/models/fcn_litehr18v2_no-aggregator.py', '../_base_/datasets/pascal_voc12_aug.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_cos_40k.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
decode_head=dict(
type='FCNHead',
in_channels=[60, 80, 160, 320],
in_index=[0, 1, 2, 3],
input_transform='multiple_select',
channels=60,
kernel_size=1,
num_convs=0,
concat_input=False,
dropout_ratio=-1,
num_classes=21,
norm_cfg=norm_cfg,
align_corners=False,
enable_aggregator=True,
enable_out_norm=False,
loss_decode=[
dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_jitter_prob=0.01,
sampler=dict(type='MaxPoolingPixelSampler', ratio=0.25, p=1.7),
loss_weight=10.0
),
]
),
train_cfg=dict(
mix_loss=dict(
enable=False,
weight=0.1
),
),
)
evaluation = dict(
metric='mIoU',
)
| 26.255814 | 97 | 0.551816 |
9e8d0cdd608d2ac6f64ff294ad8c84144336af78
| 7,377 |
py
|
Python
|
zzdb/schema.py
|
AndreiPuchko/zzdb
|
5a6878c5d9b7f013b9ddb940fe6c430ba62316fc
|
[
"Apache-2.0"
] | 1 |
2021-10-30T09:46:08.000Z
|
2021-10-30T09:46:08.000Z
|
zzdb/schema.py
|
AndreiPuchko/zzdb
|
5a6878c5d9b7f013b9ddb940fe6c430ba62316fc
|
[
"Apache-2.0"
] | null | null | null |
zzdb/schema.py
|
AndreiPuchko/zzdb
|
5a6878c5d9b7f013b9ddb940fe6c430ba62316fc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2021 Andrei Puchko
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
"""
if __name__ == "__main__":
import sys
sys.path.insert(0, ".")
from tests import test_schema
test_schema.test_schema()
# from demo import demo_sqlite
# demo_sqlite.demo()
import json
import csv
class ZzDbSchema:
"""
{"tables": {"table_name": {"columns": {}, "indexes": {...}}}}
"""
def __init__(self, schema={}):
self.schema = {"tables": {}, "indexes": {}}
def add(
self,
table="",
column="",
datatype="char",
datalen=None,
datadec=None,
to_table=None,
to_column=None,
related=None,
pk=None,
ai=None,
uk=None,
index=None,
):
"""
:param table: database table name
:param column: column name
:param datatype: type
:param datalen: lenght
:param datadec: decimal precison
:param to_table: foreign key table
:param to_column: foreign key column
:param related: foreign column to show
:param pk: primary key
:param ai: autoincrement
:param uk: unique
:param index: create index for the column
"""
if not (table or column):
return
if isinstance(table, dict):
column = table.get("column")
datatype = table.get("datatype")
datalen = table.get("datalen")
datadec = table.get("datadec")
to_table = table.get("to_table")
to_column = table.get("to_column")
related = table.get("related")
pk = table.get("pk")
ai = table.get("ai")
uk = table.get("uk")
index = table.get("index")
table = table["table"]
if table not in self.schema["tables"]:
self.schema["tables"][table] = {"columns": {}, "indexes": {}}
self.schema["tables"][table]["columns"][column] = {}
self.schema["tables"][table]["columns"][column]["datatype"] = datatype
self.schema["tables"][table]["columns"][column]["datalen"] = datalen
self.schema["tables"][table]["columns"][column]["datadec"] = datadec
self.schema["tables"][table]["columns"][column]["to_table"] = to_table
self.schema["tables"][table]["columns"][column]["to_column"] = to_column
self.schema["tables"][table]["columns"][column]["related"] = related
self.schema["tables"][table]["columns"][column]["pk"] = pk
self.schema["tables"][table]["columns"][column]["ai"] = ai
self.schema["tables"][table]["columns"][column]["uk"] = uk
self.schema["tables"][table]["columns"][column]["index"] = index
def add_index(self, table="", index_expression="", index_name=""):
if table not in self.schema["indexes"]:
self.schema["indexes"][table] = {}
self.schema["indexes"][table]["expression"] = index_expression
self.schema["indexes"][table]["name"] = index_name
def get_schema_indexes(self):
rez = []
for x in self.schema["indexes"]:
di = dict(self.schema["indexes"][x])
di['table'] = x
rez.append(di)
return rez
def get_schema_table_attr(self, table="", column="", attr=""):
"""
returs schema data for given table, column, attribute
get_schema_table_attr(table_name) - all columns
get_schema_table_attr(table_name,column_name) - given column
get_schema_table_attr(table_name,column_name,"datalen") - given attribute
"""
rez = self.schema.get("tables", {})
if table == "":
return rez
rez = rez.get(table, {}).get("columns", {})
if column == "":
return rez
rez = rez.get(column, {})
if attr == "":
return rez
return rez.get(attr, "")
def get_schema_tables(self):
return self.get_schema_table_attr()
def get_schema_columns(self, table=""):
return self.get_schema_table_attr(table)
def get_schema_attr(self, table="", column=""):
return self.get_schema_table_attr(table, column)
def get_primary_tables(self, child_table, child_record):
"""
returns list of foreign key tables and columns
for given 'child_table' and 'child_record'
used by ZzDb for integrity checking when INSERT/UPDATE
"""
rez = []
for child_column_name in self.get_schema_table_attr(child_table):
child_column = self.get_schema_table_attr(child_table, child_column_name)
if child_column.get("to_table") and child_column.get("to_column"):
rez.append(
{
"primary_table": child_column.get("to_table"),
"primary_column": child_column.get("to_column"),
"child_column": child_column_name,
"child_value": child_record.get(child_column_name, ""),
}
)
return rez
def get_child_tables(self, primary_table, primary_record):
"""
returns list of foreign key tables and columns
for given 'primary_table' and 'primary_record'
used by ZzDb for integrity checking when DELETE
"""
rez = []
for linked_table_name in self.get_schema_table_attr():
for linked_column_name in self.get_schema_table_attr(linked_table_name):
linked_column = self.get_schema_table_attr(linked_table_name, linked_column_name)
if linked_column.get("to_table") == primary_table and linked_column.get("to_column"):
parentCol = linked_column.get("to_column")
rez.append(
{
"child_table": linked_table_name,
"child_column": linked_column_name,
"parent_column": parentCol,
"parent_value": primary_record.get(parentCol),
}
)
return rez
@staticmethod
def show_table(file, table="example_table"):
if file.lower().endswith(".csv"):
print(file)
rows = [x for x in csv.DictReader(open(file), dialect="excel")]
else:
rows = json.load(open(file))
schema = {}
for row in rows:
for col in row:
if col not in schema:
schema[col] = {}
schema[col]["lenght"] = max(schema[col].get("lenght", 0), len(row[col]))
for x in schema:
print(f"schema.add(table='{table}', '{x}', datatype='char', datalen={schema[x]['lenght']})")
| 35.296651 | 104 | 0.565406 |
e2b24e426524273891eb9b9e0a42fedb8d2d28ab
| 529 |
py
|
Python
|
.history/my_classes/ScopesClosuresAndDecorators/decorators_1_20210713200116.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/ScopesClosuresAndDecorators/decorators_1_20210713200116.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/ScopesClosuresAndDecorators/decorators_1_20210713200116.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
"""Decorators
Recall the simple closure example we did which allowed us to maintain a count of ho9w many times a function was called:
"""
def counter(fn):
count = 0
def imnner(*args, **kwargs): # using *args. **kwargs means we can call any function fn with any combination of positional and keyword arguments
nonlocal count
count += 1
print('Function {0} was called {1} times'.format(fn.__name__, count))
return fn(*args, **kwargs)
return inner
def add(a, b=0):
return a + b
| 31.117647 | 150 | 0.661626 |
91f813b13ed1868d33738a78afe558a0dedde88a
| 2,160 |
py
|
Python
|
pythainlp/soundex/metasound.py
|
fossabot/pythainlp
|
b3ea0ea2039ab9421bf851a73beb2559f3a28624
|
[
"Apache-2.0"
] | null | null | null |
pythainlp/soundex/metasound.py
|
fossabot/pythainlp
|
b3ea0ea2039ab9421bf851a73beb2559f3a28624
|
[
"Apache-2.0"
] | null | null | null |
pythainlp/soundex/metasound.py
|
fossabot/pythainlp
|
b3ea0ea2039ab9421bf851a73beb2559f3a28624
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Thai soundex - MetaSound system
References:
Snae & Brückner. (2009). Novel Phonetic Name Matching Algorithm with a Statistical
Ontology for Analysing Names Given in Accordance with Thai Astrology.
https://pdfs.semanticscholar.org/3983/963e87ddc6dfdbb291099aa3927a0e3e4ea6.pdf
"""
_CONS_THANTHAKHAT = "กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรลวศษสหฬอฮ์"
_THANTHAKHAT = "์" # \u0e4c
_C1 = "กขฃคฆฅ" # sound K -> coded letter 1
_C2 = "จฉชฌซฐทฒดฎตสศษ" # D -> 2
_C3 = "ฟฝพผภบป" # B -> 3
_C4 = "ง" # NG -> 4
_C5 = "ลฬรนณฦญ" # N -> 5
_C6 = "ม" # M -> 6
_C7 = "ย" # Y -> 7
_C8 = "ว" # W -> 8
def metasound(text, length=4):
"""
Thai MetaSound
:param str text: Thai text
:param int length: preferred length of the MetaSound (default is 4)
:return: MetaSound for the text
**Example**::
from pythainlp.metasound import metasound
metasound("ลัก") # 'ล100'
metasound("รัก") # 'ร100'
metasound("รักษ์") # 'ร100'
metasound("บูรณการ", 5)) # 'บ5515'
"""
if not text:
return ""
# keep only consonants and thanthakhat
chars = []
for ch in text:
if ch in _CONS_THANTHAKHAT:
chars.append(ch)
# remove karan (thanthakhat and a consonant before it)
i = 0
while i < len(chars):
if chars[i] == _THANTHAKHAT:
if i > 0:
chars[i - 1] = " "
chars[i] = " "
i += 1
# retain first consonant, encode the rest
chars = chars[:length]
i = 1
while i < len(chars):
if chars[i] in _C1:
chars[i] = "1"
elif chars[i] in _C2:
chars[i] = "2"
elif chars[i] in _C3:
chars[i] = "3"
elif chars[i] in _C4:
chars[i] = "4"
elif chars[i] in _C5:
chars[i] = "5"
elif chars[i] in _C6:
chars[i] = "6"
elif chars[i] in _C7:
chars[i] = "7"
elif chars[i] in _C8:
chars[i] = "8"
else:
chars[i] = "0"
i += 1
while len(chars) < length:
chars.append("0")
return "".join(chars)
| 26.024096 | 82 | 0.534259 |
4eae4a5396b6c3815938016485ade84b8eee42cb
| 8,710 |
py
|
Python
|
incidentsdq.py
|
TransitionProjects/IncidentReportDQ
|
8cce17efa549bb38054505629338eeb6e78ce4f2
|
[
"MIT"
] | null | null | null |
incidentsdq.py
|
TransitionProjects/IncidentReportDQ
|
8cce17efa549bb38054505629338eeb6e78ce4f2
|
[
"MIT"
] | 1 |
2018-05-02T23:51:28.000Z
|
2018-05-02T23:51:28.000Z
|
incidentsdq.py
|
TransitionProjects/IncidentReportDQ
|
8cce17efa549bb38054505629338eeb6e78ce4f2
|
[
"MIT"
] | null | null | null |
"""
This class will take the raw 'Agency - Exclusions Report' from ServicePoint's ART and process it into a DQ report for
tracking inconsistent data entry by TPI staff members. The rules this class checks against with the missing_data_check
method are not particularly flexible at this time and heavy modifications will need to be made to make this script work
for ServicePoint using agencies.
"""
import numpy as np
import pandas as pd
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import asksaveasfilename
class incidentReport:
def __init__(self):
self.file = askopenfilename(title="Open the Agency - Exclusion Report")
self.raw_data = pd.read_excel(self.file, sheet_name="Exclusions")
self.staff_list = pd.read_excel(
askopenfilename(title="Open the Staff Names Report"),
sheet_name="All"
)
def missing_data_check(self, data_frame):
"""
The raw 'Agency - Exclusion Report' will be processed by this method using multiple numpy.select calls to make
sure that each of the fields matches the current best practices for the TPI agency.
:param data_frame: This should be a pandas data frame created from the 'Agency - Exclusion Report' ART report
:return: a data frame showing incidents with errors will be returned
"""
data = data_frame
staff = self.staff_list
missing_df = data.merge(
staff,
how="left",
left_on="Infraction User Creating",
right_on="CM"
)
check_columns = {
"Infraction Provider": "Provider Error",
"Infraction Banned End Date": "End Date Error",
"Infraction Staff Person": "Staff Name Error",
"Infraction Type": "Incident Error",
"Infraction Banned Code": "Incident Code Error",
"Infraction Banned Sites": "Sites Excluded From Error",
"Infraction Notes": "Notes Error"
}
incident_types = [
"Non-compliance with program",
"Violent Behavior",
"Police Called",
"Alcohol",
"Drugs"
]
incident_codes = [
"Bar - Other",
"TPI_Exclusion - Agency (requires reinstatement)"
]
for column in check_columns.keys():
if column == "Infraction Provider":
conditions = [(missing_df[column] == "Transition Projects (TPI) - Agency - SP(19)")]
choices = ["Incorrect Provider"]
missing_df[check_columns[column]] = np.select(conditions, choices, default=None)
elif column == "Infraction Banned End Date":
conditions = [
(
missing_df[column].notna() &
(missing_df["Infraction Banned Code"] == "TPI_Exclusion - Agency (requires reinstatement)")
),
(
missing_df[column].isna() &
~(missing_df["Infraction Banned Code"] == "TPI_Exclusion - Agency (requires reinstatement)")
)
]
choices = ["End Date Should Be Blank", "End Date Should Not Be Blank"]
missing_df[check_columns[column]] = np.select(conditions, choices, default=None)
elif column == "Infraction Staff Person":
conditions = [(missing_df[column].isna())]
choices = ["No Staff Name Entered"]
missing_df[check_columns[column]] = np.select(conditions, choices, default=None)
elif column == "Infraction Type":
conditions = [missing_df[column].isna(), ~(missing_df[column].isin(incident_types))]
choices = ["No Incident Selected", "Non-TPI Incident Selected"]
missing_df[check_columns[column]] = np.select(conditions, choices, default=None)
elif column == "Infraction Banned Code":
conditions = [(missing_df[column].isna()), ~(missing_df[column].isin(incident_codes))]
choices = ["No Incident Code Selected", "Non-TPI Incident Code Selected"]
missing_df[check_columns[column]] = np.select(conditions, choices, default=None)
elif column == "Infraction Banned Sites":
conditions = [(missing_df[column].isna())]
choices = ["No Sites Excluded From Entry"]
missing_df[check_columns[column]] = np.select(conditions, choices, default=None)
elif column == "Infraction Notes":
conditions = [
missing_df[column].isna(),
(
missing_df[column].str.contains("uno") |
missing_df[column].str.contains("UNO")
)
]
choices = ["No Notes Entered", "Use of department specific shorthand"]
missing_df[check_columns[column]] = np.select(conditions, choices, default=None)
else:
pass
missing_df = missing_df[[
"Client Uid",
"Name",
"Infraction User Updating",
"Infraction Banned Start Date",
"Provider Error",
"End Date Error",
"Staff Name Error",
"Incident Error",
"Incident Code Error",
"Sites Excluded From Error",
"Notes Error",
"Dept"
]]
missing_df["Infraction Banned Start Date"] = missing_df["Infraction Banned Start Date"].dt.date
# counts columns in the provided range with a value
missing_df["Errors"] = missing_df[[
"Provider Error",
"End Date Error",
"Staff Name Error",
"Incident Error",
"Incident Code Error",
"Sites Excluded From Error",
"Notes Error"
]].apply(lambda x: x.count(), axis=1)
return missing_df
def create_summary(self, data_frame):
"""
This method will take the data_frame parameter, turn it into a pivot table using pandas'
.pivot_table method and add a new Error Rate column
:data_frame: the errors data_frame
:return: Will return a pivot table using data from the data_fram parameter
"""
staff_pivot = pd.pivot_table(
data_frame,
index=["Dept", "Name"],
values=["Client Uid", "Errors"],
aggfunc={"Client Uid": len, "Errors": np.sum}
)
staff_pivot["Error Rate"] = staff_pivot["Errors"] / (staff_pivot["Client Uid"] * 7)
dept_pivot = pd.pivot_table(
data_frame,
index=["Dept"],
values=["Client Uid", "Errors"],
aggfunc={"Client Uid": len, "Errors": np.sum}
)
dept_pivot["Error Rate"] = dept_pivot["Errors"] / (dept_pivot["Client Uid"] * 7)
return staff_pivot, dept_pivot
def process(self):
"""
This method will call the missing_data_check method then create a excel spreadsheet with moth an Errors sheet
and a Raw Data sheet. These will then be saved using an asksaveasfilename function call.
:return: True will be returned if the method completes correctly.
"""
raw = self.raw_data.copy()[[
"Client Uid",
"Infraction User Creating",
"Infraction User Updating",
"Infraction Provider",
"Infraction Date Added",
"Infraction Banned Start Date",
"Infraction Banned End Date",
"Infraction Staff Person",
"Infraction Type",
"Infraction Banned Code",
"Infraction Banned Sites",
"Infraction Notes"
]]
errors = self.missing_data_check(self.raw_data.copy())
staff_summary, dept_summary = self.create_summary(errors)
writer = pd.ExcelWriter(
asksaveasfilename(title="Save the processed exclusion report"),
engine="xlsxwriter"
)
staff_summary.to_excel(writer, sheet_name="Staff Summary")
dept_summary.to_excel(writer, sheet_name="Dept Summary")
errors.to_excel(writer, sheet_name="Errors", index=False)
raw.to_excel(writer, sheet_name="Raw Data", index=False)
writer.save()
return True
if __name__ == "__main__":
a = incidentReport()
a.process()
| 43.55 | 120 | 0.565901 |
4a5d41f15291bbe0034a156fd90dd5ed3e5376f7
| 5,364 |
py
|
Python
|
Per6_Game.py
|
nknotts0176/-Per6_Nikolye_pYgAmE
|
c91f74972c8e0514dc8fe12f41c8313bde2327d8
|
[
"MIT"
] | null | null | null |
Per6_Game.py
|
nknotts0176/-Per6_Nikolye_pYgAmE
|
c91f74972c8e0514dc8fe12f41c8313bde2327d8
|
[
"MIT"
] | null | null | null |
Per6_Game.py
|
nknotts0176/-Per6_Nikolye_pYgAmE
|
c91f74972c8e0514dc8fe12f41c8313bde2327d8
|
[
"MIT"
] | null | null | null |
print("1")
import pygame
print("2")
import random
print("3")
import projectbutton
print("4")
score = 0
#inputs for Window
Y_N_start = input("Begin game? Y/N ")
if Y_N_start == "Y":
width_input = int(input("W? (Input must be above 599) "))
height_input = int(input("H? (Input must be above 599) "))
else:
quit
clock = pygame.time.Clock()
def Game_board(width_input, height_input):
screen = pygame.display.set_mode((width_input, height_input))
screen.fill((0, 50, 0))
pygame.draw.aaline(screen, (225, 0, 0), (50, 50), (50, height_input - 50))
pygame.draw.aaline(screen, (225, 0, 0), (50, 50), (width_input - 50, 50))
pygame.draw.aaline(screen, (225, 0, 0), (width_input - 50, height_input - 50), (50, height_input - 50))
pygame.draw.aaline(screen, (225, 0, 0), (width_input - 50, height_input - 50), (width_input - 50, 50))
def Game_Start(Y_N_start):
if Y_N_start == "Y":
screen = pygame.display.set_mode((width_input, height_input))
print(Game_board(width_input, height_input))
running = True
while running:
FUNction = projectbutton.move(projectbutton.left_button, projectbutton.right_button)
x = random.randint(50, width_input - 50)
y = random.randint(50, height_input - 50)
screen.set_at((x, y), (0, 70, 225))
# The Pixel design
if FUNction == "Left":
food = [screen.set_at((x + 1, y), (54, 70, 225)), #right
screen.set_at((x + 1, y + 1), (0, 40, 225)), #top right
screen.set_at((x, y + 1), (220, 70, 225)), #top
screen.set_at((x - 1, y + 1), (130, 70, 225)), #top left
screen.set_at((x - 1, y), (240, 70, 225)), #left
screen.set_at((x - 1, y - 1), (230, 70, 225)), #bot left
screen.set_at((x, y - 1), (130, 70, 225)), #bot
screen.set_at((x + 1, y - 1), (10, 70, 225)), #bot right
screen.set_at((x + 2, y), (40, 70, 225)), #right
screen.set_at((x + 2, y + 2), (0, 70, 225)), #top right
screen.set_at((x, y + 2), (0, 70, 225)), #top
screen.set_at((x - 2, y + 2), (0, 70, 225)), #top left
screen.set_at((x - 2, y), (0, 70, 225)), #left
screen.set_at((x - 2, y - 2), (0, 70, 225)), #bot left
screen.set_at((x, y - 2), (0, 70, 225)), #bot
screen.set_at((x + 2, y - 2), (0, 70, 225))] #bot right
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
snake_head = [screen.set_at((248, 248), (255, 0, 0)),
screen.set_at((248, 249), (255, 0, 0)),
screen.set_at((248, 250), (225, 0, 0)), #back row of head (Left = back, right = front)
screen.set_at((248, 251), (225, 0, 0)),
screen.set_at((248, 252), (255, 0, 0)),
screen.set_at((249, 248), (225, 0, 0)),
screen.set_at((249, 249), (225, 0, 0)),
screen.set_at((249, 250), (255, 0, 0)), #2nd to back
screen.set_at((249, 251), (255, 0, 0)),
screen.set_at((249, 252), (225, 0, 0)),
screen.set_at((250, 248), (255, 0, 0)),
screen.set_at((250, 249), (255, 0, 0)),
screen.set_at((250, 250), (255, 0, 0)), #middle
screen.set_at((250, 251), (255, 0, 0)),
screen.set_at((250, 252), (255, 0, 0)),
screen.set_at((251, 248), (255, 0, 0)),
screen.set_at((251, 249), (255, 0, 0)),
screen.set_at((251, 250), (255, 0, 0)), #second to front
screen.set_at((251, 251), (225, 0, 0)),
screen.set_at((251, 252), (255, 0, 0)),
screen.set_at((252, 248), (225, 0, 0)),
screen.set_at((252, 249), (225, 0, 0)),
screen.set_at((252, 250), (255, 0, 0)), #front
screen.set_at((252, 251), (255, 0, 0)),
screen.set_at((252, 252), (225, 0, 0))]
#if snake_head[0] in snake_head[1:]:
# break
pygame.display.flip()
clock.tick(.2)
print(Game_Start(Y_N_start))
| 45.07563 | 123 | 0.396905 |
934a819c7f861ab8078eb5a5be4f54f08818693c
| 13,223 |
py
|
Python
|
backend/moonstreamapi/admin/cli.py
|
zomglings/moonstream
|
954f6014f782157ff3d708d0697457c4306a6588
|
[
"Apache-2.0"
] | 1 |
2022-01-06T07:42:51.000Z
|
2022-01-06T07:42:51.000Z
|
backend/moonstreamapi/admin/cli.py
|
zomglings/moonstream
|
954f6014f782157ff3d708d0697457c4306a6588
|
[
"Apache-2.0"
] | null | null | null |
backend/moonstreamapi/admin/cli.py
|
zomglings/moonstream
|
954f6014f782157ff3d708d0697457c4306a6588
|
[
"Apache-2.0"
] | null | null | null |
"""
Moonstream CLI
"""
import argparse
import logging
import json
import os
from posix import listdir
from typing import Optional
from moonstreamdb.db import SessionLocal
from ..settings import BUGOUT_BROOD_URL, BUGOUT_SPIRE_URL, MOONSTREAM_APPLICATION_ID
from ..web3_provider import yield_web3_provider
from . import subscription_types, subscriptions, moonworm_tasks
from .migrations import checksum_address, update_dashboard_subscription_key
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
MIGRATIONS_FOLDER = "./moonstream/admin/migrations"
def parse_boolean_arg(raw_arg: Optional[str]) -> Optional[bool]:
if raw_arg is None:
return None
raw_arg_lower = raw_arg.lower()
if raw_arg_lower in ["t", "true", "1", "y", "yes"]:
return True
return False
def migrations_list(args: argparse.Namespace) -> None:
migrations_overview = f"""
- id: 20211101
name: {checksum_address.__name__}
description: {checksum_address.__doc__}
"""
logger.info(migrations_overview)
json_migrations_oreview = "Available migrations files."
for file in os.listdir(MIGRATIONS_FOLDER):
if file.endswith(".json"):
with open(os.path.join(MIGRATIONS_FOLDER, file), "r") as migration_file:
json_migrations_oreview += "\n\n"
migration = json.load(migration_file)
json_migrations_oreview = "\n".join(
(json_migrations_oreview, f"- id: {migration['id']}")
)
json_migrations_oreview = "\n".join(
(json_migrations_oreview, f" file: {file}")
)
json_migrations_oreview = "\n".join(
(
json_migrations_oreview,
f" description: {migration['description']}",
)
)
logger.info(json_migrations_oreview)
def migrations_run(args: argparse.Namespace) -> None:
web3_session = yield_web3_provider()
db_session = SessionLocal()
try:
if args.id == 20211101:
logger.info("Starting update of subscriptions in Brood resource...")
checksum_address.checksum_all_subscription_addresses(web3_session)
logger.info("Starting update of ethereum_labels in database...")
checksum_address.checksum_all_labels_addresses(db_session, web3_session)
elif args.id == 20211202:
update_dashboard_subscription_key.update_dashboard_resources_key()
else:
drop_keys = []
if args.file is not None:
with open(args.file) as migration_json_file:
migration_json = json.load(migration_json_file)
if (
"match" not in migration_json
or "update" not in migration_json[args.command]
or "description" not in migration_json
):
print(
'Migration file plan have incorrect format require specified {"match": {},"description": "","upgrade": { "update": {}, "drop_keys": [] }, "downgrade": { "update": {}, "drop_keys": [] }}'
)
return
match = migration_json["match"]
description = migration_json["description"]
update = migration_json[args.command]["update"]
file = args.file
if "drop_keys" in migration_json[args.command]:
drop_keys = migration_json[args.command]["drop_keys"]
subscriptions.migrate_subscriptions(
match=match,
descriptions=description,
update=update,
drop_keys=drop_keys,
file=file,
)
else:
print("Specified ID or migration FILE is required.")
return
finally:
db_session.close()
def moonworm_tasks_list_handler(args: argparse.Namespace) -> None:
moonworm_tasks.get_list_of_addresses()
def moonworm_tasks_add_subscription_handler(args: argparse.Namespace) -> None:
moonworm_tasks.add_subscription(args.id)
def main() -> None:
cli_description = f"""Moonstream Admin CLI
Please make sure that the following environment variables are set in your environment and exported to
subprocesses:
1. MOONSTREAM_APPLICATION_ID
2. MOONSTREAM_ADMIN_ACCESS_TOKEN
Current Moonstream application ID: {MOONSTREAM_APPLICATION_ID}
This CLI is configured to work with the following API URLs:
- Brood: {BUGOUT_BROOD_URL} (override by setting BUGOUT_BROOD_URL environment variable)
- Spire: {BUGOUT_SPIRE_URL} (override by setting BUGOUT_SPIRE_URL environment variable)
"""
parser = argparse.ArgumentParser(
description=cli_description,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(func=lambda _: parser.print_help())
subcommands = parser.add_subparsers(description="Moonstream commands")
parser_subscription_types = subcommands.add_parser(
"subtypes", description="Manage Moonstream subscription types"
)
parser_subscription_types.set_defaults(
func=lambda _: parser_subscription_types.print_help()
)
subcommands_subscription_types = parser_subscription_types.add_subparsers()
parser_subscription_types_create = subcommands_subscription_types.add_parser(
"create", description="Create subscription type"
)
parser_subscription_types_create.add_argument(
"-i", "--id", required=True, type=str, help="ID for the subscription type"
)
parser_subscription_types_create.add_argument(
"-n",
"--name",
required=True,
type=str,
help="Human-friendly name for the subscription type",
)
parser_subscription_types_create.add_argument(
"-d",
"--description",
required=True,
type=str,
help="Detailed description of the subscription type",
)
parser_subscription_types_create.add_argument(
"-c",
"--choices",
nargs="*",
help="Available subscription options for from builder.",
required=True,
)
parser_subscription_types_create.add_argument(
"--icon",
required=True,
help="URL to the icon representing this subscription type",
)
parser_subscription_types_create.add_argument(
"--stripe-product-id",
required=False,
default=None,
type=str,
help="Stripe product id",
)
parser_subscription_types_create.add_argument(
"--stripe-price-id",
required=False,
default=None,
type=str,
help="Stripe price id",
)
parser_subscription_types_create.add_argument(
"--active",
action="store_true",
help="Set this flag to mark the subscription as active",
)
parser_subscription_types_create.set_defaults(
func=subscription_types.cli_create_subscription_type
)
parser_subscription_types_list = subcommands_subscription_types.add_parser(
"list", description="List subscription types"
)
parser_subscription_types_list.add_argument(
"--active",
action="store_true",
help="Set this flag to only list active subscription types",
)
parser_subscription_types_list.set_defaults(
func=subscription_types.cli_list_subscription_types
)
parser_subscription_types_get = subcommands_subscription_types.add_parser(
"get", description="Get a subscription type by its ID"
)
parser_subscription_types_get.add_argument(
"-i",
"--id",
required=True,
help="ID of the subscription type you would like information about",
)
parser_subscription_types_get.set_defaults(
func=subscription_types.cli_get_subscription_type
)
parser_subscription_types_update = subcommands_subscription_types.add_parser(
"update", description="Create subscription type"
)
parser_subscription_types_update.add_argument(
"-i", "--id", required=True, type=str, help="ID for the subscription type"
)
parser_subscription_types_update.add_argument(
"-n",
"--name",
required=False,
default=None,
type=str,
help="Human-friendly name for the subscription type",
)
parser_subscription_types_update.add_argument(
"-d",
"--description",
required=False,
default=None,
type=str,
help="Detailed description of the subscription type",
)
parser_subscription_types_update.add_argument(
"-c",
"--choices",
nargs="*",
help="Available subscription options for form builder.",
required=False,
)
parser_subscription_types_update.add_argument(
"--icon",
required=False,
default=None,
help="URL to the icon representing this subscription type",
)
parser_subscription_types_update.add_argument(
"--stripe-product-id",
required=False,
default=None,
type=str,
help="Stripe product id",
)
parser_subscription_types_update.add_argument(
"--stripe-price-id",
required=False,
default=None,
type=str,
help="Stripe price id",
)
parser_subscription_types_update.add_argument(
"--active",
required=False,
type=parse_boolean_arg,
default=None,
help="Mark the subscription as active (True) or inactive (False).",
)
parser_subscription_types_update.set_defaults(
func=subscription_types.cli_update_subscription_type
)
parser_subscription_types_delete = subcommands_subscription_types.add_parser(
"delete", description="Delete a subscription type by its ID"
)
parser_subscription_types_delete.add_argument(
"-i",
"--id",
required=True,
help="ID of the subscription type you would like to delete.",
)
parser_subscription_types_delete.set_defaults(
func=subscription_types.cli_delete_subscription_type
)
parser_subscription_types_canonicalize = subcommands_subscription_types.add_parser(
"ensure-canonical",
description="Ensure that the connected Brood API contains resources for each of the canonical subscription types",
)
parser_subscription_types_canonicalize.set_defaults(
func=subscription_types.cli_ensure_canonical_subscription_types
)
parser_migrations = subcommands.add_parser(
"migrations", description="Manage database, resource and etc migrations"
)
parser_migrations.set_defaults(func=lambda _: parser_migrations.print_help())
subcommands_migrations = parser_migrations.add_subparsers(
description="Migration commands"
)
parser_migrations_list = subcommands_migrations.add_parser(
"list", description="List migrations"
)
parser_migrations_list.set_defaults(func=migrations_list)
parser_migrations_run = subcommands_migrations.add_parser(
"run", description="Run migration"
)
parser_migrations_run.add_argument(
"-i", "--id", required=False, type=int, help="Provide migration ID"
)
parser_migrations_run.add_argument(
"-f", "--file", required=False, type=str, help="path to file"
)
parser_migrations_run.add_argument(
"-c",
"--command",
default="upgrade",
choices=["upgrade", "downgrade"],
type=str,
help="Command for migration",
)
parser_migrations_run.set_defaults(func=migrations_run)
parser_moonworm_tasks = subcommands.add_parser(
"moonworm-tasks", description="Manage tasks for moonworm journal."
)
parser_moonworm_tasks.set_defaults(func=lambda _: parser_migrations.print_help())
subcommands_moonworm_tasks = parser_moonworm_tasks.add_subparsers(
description="Moonworm taks commands"
)
parser_moonworm_tasks_list = subcommands_moonworm_tasks.add_parser(
"list", description="Return list of addresses in moonworm journal."
)
parser_moonworm_tasks_list.set_defaults(func=moonworm_tasks_list_handler)
parser_moonworm_tasks_add = subcommands_moonworm_tasks.add_parser(
"add_subscription", description="Manage tasks for moonworm journal."
)
parser_moonworm_tasks_add.add_argument(
"-i",
"--id",
type=str,
help="Id of subscription for add to moonworm tasks.",
)
parser_moonworm_tasks_add.set_defaults(func=moonworm_tasks_add_subscription_handler)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
| 34.434896 | 211 | 0.642214 |
1df22e4c72d5ac3e55797ffa140c562902eb3f37
| 3,876 |
py
|
Python
|
tuesmon_ncurses/ui/views/backlog.py
|
tuesmoncom/tuesmon-ncurses
|
21cd4c39f2eed13e7fe42cac0e70d752f76382ca
|
[
"Apache-2.0"
] | null | null | null |
tuesmon_ncurses/ui/views/backlog.py
|
tuesmoncom/tuesmon-ncurses
|
21cd4c39f2eed13e7fe42cac0e70d752f76382ca
|
[
"Apache-2.0"
] | null | null | null |
tuesmon_ncurses/ui/views/backlog.py
|
tuesmoncom/tuesmon-ncurses
|
21cd4c39f2eed13e7fe42cac0e70d752f76382ca
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
tuesmon_ncurses.ui.views.backlog
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import urwid
from tuesmon_ncurses.ui.widgets import generic, backlog
from . import base
class ProjectBacklogSubView(base.SubView):
help_popup_title = "Backlog Help Info"
help_popup_info = base.SubView.help_popup_info + (
( "Backlog Movements:", (
("↑ | k | ctrl p", "Move Up"),
("↓ | j | ctrl n", "Move Down"),
("← | h | ctrl b", "Move Left"),
("→ | l | ctrl f", "Move Right"),
)),
( "User Stories Actions:", (
("n", "Create new US"),
("N", "Create new USs in bulk"),
("e", "Edit selected US"),
("Supr", "Delete selected US"),
("K", "Move selected US up"),
("J", "Move selected US down"),
("w", "Save the position of all USs"),
("m", "Move selected US to a Milestone"),
("r", "Refresh the screen")
)),
)
def __init__(self, parent_view, project, notifier, tabs):
super().__init__(parent_view)
self.project = project
self.notifier = notifier
self.stats = backlog.BacklogStats(project)
self.user_stories = backlog.UserStoryList(project)
list_walker = urwid.SimpleFocusListWalker([
tabs,
generic.box_solid_fill(" ", 1),
self.stats,
generic.box_solid_fill(" ", 1),
self.user_stories
])
list_walker.set_focus(4)
self.widget = urwid.ListBox(list_walker)
def open_user_story_form(self, user_story={}):
self.user_story_form = backlog.UserStoryForm(self.project, user_story=user_story)
# FIXME: Calculate the form size
self.parent.show_widget_on_top(self.user_story_form, 80, 24)
def close_user_story_form(self):
del self.user_story_form
self.parent.hide_widget_on_top()
def get_user_story_form_data(self):
data = {}
if hasattr(self, "user_story_form"):
data.update({
"subject": self.user_story_form.subject,
"milestone": self.user_story_form.milestone,
"points": self.user_story_form.points,
"status": self.user_story_form.status,
"is_blocked": self.user_story_form.is_blocked,
"blocked_note": self.user_story_form.blocked_note,
"tags": self.user_story_form.tags,
"description": self.user_story_form.description,
"team_requirement": self.user_story_form.team_requirement,
"client_requirement": self.user_story_form.client_requirement,
"project": self.project["id"],
})
return data
def open_user_stories_in_bulk_form(self):
self.user_stories_in_bulk_form = backlog.UserStoriesInBulkForm(self.project)
# FIXME: Calculate the form size
self.parent.show_widget_on_top(self.user_stories_in_bulk_form, 80, 24)
def close_user_stories_in_bulk_form(self):
del self.user_stories_in_bulk_form
self.parent.hide_widget_on_top()
def get_user_stories_in_bulk_form_data(self):
data = {}
if hasattr(self, "user_stories_in_bulk_form"):
data.update({
"bulkStories": self.user_stories_in_bulk_form.subjects,
"projectId": self.project["id"],
})
return data
def open_milestones_selector_popup(self, user_story={}):
self.milestone_selector_popup = backlog.MIlestoneSelectorPopup(self.project, user_story)
# FIXME: Calculate the popup size
self.parent.show_widget_on_top(self.milestone_selector_popup, 100, 30)
def close_milestone_selector_popup(self):
del self.milestone_selector_popup
self.parent.hide_widget_on_top()
| 35.559633 | 96 | 0.606553 |
a0a8199f74e27a27e32250a2c661d417ee347afc
| 38 |
py
|
Python
|
perses/bias/__init__.py
|
schallerdavid/perses
|
58bd6e626e027879e136f56e175683893e016f8c
|
[
"MIT"
] | 99 |
2016-01-19T18:10:37.000Z
|
2022-03-26T02:43:08.000Z
|
perses/bias/__init__.py
|
schallerdavid/perses
|
58bd6e626e027879e136f56e175683893e016f8c
|
[
"MIT"
] | 878 |
2015-09-18T19:25:30.000Z
|
2022-03-31T02:33:04.000Z
|
perses/bias/__init__.py
|
schallerdavid/perses
|
58bd6e626e027879e136f56e175683893e016f8c
|
[
"MIT"
] | 30 |
2015-09-21T15:26:35.000Z
|
2022-01-10T20:07:24.000Z
|
from perses.bias.bias_engine import *
| 19 | 37 | 0.815789 |
0f871019bf966e0f44e2ca324510cbec47e9cf69
| 8,445 |
py
|
Python
|
test/functional/feature_proxy.py
|
vivuscoin/vivuscoin
|
ba0db89712234bf68b2d6b63ef2c420d65c7c25d
|
[
"MIT"
] | null | null | null |
test/functional/feature_proxy.py
|
vivuscoin/vivuscoin
|
ba0db89712234bf68b2d6b63ef2c420d65c7c25d
|
[
"MIT"
] | null | null | null |
test/functional/feature_proxy.py
|
vivuscoin/vivuscoin
|
ba0db89712234bf68b2d6b63ef2c420d65c7c25d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Copyright (c) 2021 The Vivuscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test vivuscoind with different proxy configuration.
Test plan:
- Start vivuscoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on vivuscoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create vivuscoinds that connect to them
- Manipulate the vivuscoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import VivuscoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(VivuscoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: vivuscoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: vivuscoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("vivuscoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"vivuscoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.600985 | 123 | 0.627827 |
f87e9da093aae6e9e2053a4569bc759932b69baf
| 1,292 |
py
|
Python
|
venv/lib/python3.9/site-packages/google/cloud/monitoring_v3/services/service_monitoring_service/transports/__init__.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 18 |
2020-09-19T17:52:47.000Z
|
2022-03-25T12:09:22.000Z
|
venv/lib/python3.9/site-packages/google/cloud/monitoring_v3/services/service_monitoring_service/transports/__init__.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 110 |
2020-02-05T15:26:47.000Z
|
2022-03-28T23:02:02.000Z
|
venv/lib/python3.9/site-packages/google/cloud/monitoring_v3/services/service_monitoring_service/transports/__init__.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 26 |
2020-02-08T00:05:46.000Z
|
2022-03-27T19:32:26.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import ServiceMonitoringServiceTransport
from .grpc import ServiceMonitoringServiceGrpcTransport
from .grpc_asyncio import ServiceMonitoringServiceGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ServiceMonitoringServiceTransport]]
_transport_registry["grpc"] = ServiceMonitoringServiceGrpcTransport
_transport_registry["grpc_asyncio"] = ServiceMonitoringServiceGrpcAsyncIOTransport
__all__ = (
"ServiceMonitoringServiceTransport",
"ServiceMonitoringServiceGrpcTransport",
"ServiceMonitoringServiceGrpcAsyncIOTransport",
)
| 35.888889 | 82 | 0.797214 |
114fe1798c159fe3e6701928bb26f70b950c9c56
| 5,530 |
py
|
Python
|
datasets.py
|
luyiyun/NormAE
|
19eeb6c76af02855229092c5a53546b1fd7703de
|
[
"MIT"
] | 18 |
2019-12-08T06:01:13.000Z
|
2022-02-25T09:49:13.000Z
|
datasets.py
|
luyiyun/NormAE
|
19eeb6c76af02855229092c5a53546b1fd7703de
|
[
"MIT"
] | 4 |
2021-07-21T13:49:15.000Z
|
2022-01-27T12:31:55.000Z
|
datasets.py
|
luyiyun/NormAE
|
19eeb6c76af02855229092c5a53546b1fd7703de
|
[
"MIT"
] | 7 |
2020-08-04T13:14:13.000Z
|
2022-01-26T17:36:16.000Z
|
import numpy as np
import pandas as pd
import torch.utils.data as data
from sklearn.model_selection import train_test_split
class BaseData(data.Dataset):
''' Base Data Class '''
def __init__(self, X_df, Y_df, pre_transfer=None):
'''
X_df: samples x peakes, dataframe;
Y_df:samples x 4, the colnames are injection.order, batch, group and
class, group is the representation for CRC(1) and CE(0), class is the
representation for Subject(1) and QCs(0), -1 represeents None.
'''
super(BaseData, self).__init__()
self.x_df, self.y_df = X_df, Y_df
self.pre_transfer = pre_transfer
if self.pre_transfer is not None:
self.x_df, self.y_df = self.pre_transfer(self.x_df, self.y_df)
def __len__(self):
return len(self.x_df)
def __getitem__(self, indx):
sample_x, sample_y = self.x_df.values[indx], self.y_df.values[indx]
return sample_x, sample_y
def transform(self, trans):
''' transform X and Y '''
self.x_df, self.x_df = trans(self.x_df, self.x_df)
return self
@property
def num_features(self):
''' the number of peaks '''
return self.x_df.shape[1]
@property
def num_batch_labels(self):
''' the number of batches '''
return len(self.y_df['batch'].unique())
class ConcatData(BaseData):
''' concatenate two BaseData objects '''
def __init__(self, *datas):
x_dfs = pd.concat([d.x_df for d in datas], axis=0)
y_dfs = pd.concat([d.y_df for d in datas], axis=0)
super(ConcatData, self).__init__(x_dfs, y_dfs, None)
def get_metabolic_data(
x_file, y_file, pre_transfer=None, sub_qc_split=True, use_log=False,
use_batch=None, use_samples_size=None, random_seed=None
):
'''
Read metabolic data file and get dataframes
metabolic data (x_file) example:
name,mz,rt,QC1,A1,A2,A3,QC2,A4\n
M64T32,64,32,1000,2000,3000,4000,5000,6000\n
M65T33,65,33,10000,20000,30000,40000,50000,60000\n
...
sample information data (y_file) example:
sample.name,injection.order,batch,group,class\n
QC1,1,1,QC,QC\n
A1,2,1,0,Subject\n
A2,3,1,1,Subject\n
A3,4,1,1,Subject\n
QC2,5,2,QC,QC\n
A4,6,2,0,Subject\n
A5,7,2,1,Subject\n
A6,8,2,1,Subject\n
...
'''
# read y_file
y_df = pd.read_csv(y_file, index_col='sample.name')
y_df = y_df.dropna()
y_num = y_df.shape[-1]
# read x_file
meta_df = pd.read_csv(x_file, index_col='name').drop(['mz', 'rt'], axis=1)
meta_df = meta_df.T.rename_axis(index='sample', columns='meta')
# merge
all_df = y_df.merge(meta_df,
how='inner',
left_index=True,
right_index=True)
# remove peaks that has most zero values in all samples
meta_df, y_df = all_df.iloc[:, y_num:], all_df.iloc[:, :y_num]
mask1 = (meta_df == 0).mean(axis=0) < 0.2
meta_df = meta_df.loc[:, mask1]
# remove peaks that has most zero values in QCs
qc_mask = y_df['class'] == 'QC'
qc_meta_df = meta_df.loc[qc_mask, :]
mask2 = (qc_meta_df == 0).mean(axis=0) < 0.2
meta_df = meta_df.loc[:, mask2]
# for each peak, impute the zero values with the half of minimum values
def impute_zero(peak):
zero_mask = peak == 0
if zero_mask.any():
new_x = peak.copy()
impute_value = peak.loc[~zero_mask].min()
new_x[zero_mask] = impute_value / 2
return new_x
return peak
meta_df = meta_df.apply(impute_zero, axis=0)
# extract the useful information from y_file
y_df = y_df.loc[:, ['injection.order', 'batch', 'group', 'class']]
# batch labels are transform to beginning from zero
y_df.loc[:, 'batch'] -= 1
# digitize group
y_df['group'].replace('QC', '-1', inplace=True)
y_df['group'] = y_df['group'].astype('int')
# digitize class
y_df['class'].replace({'Subject': 1, 'QC': 0}, inplace=True)
# inverse injection.order
# y_df['injection.order'] = y_df['injection.order'].max(
# ) - y_df['injection.order']
if use_batch is not None:
bool_ind = (y_df.loc[:, "batch"] < use_batch).values
meta_df, y_df = meta_df.loc[bool_ind, :], y_df.loc[bool_ind, :]
if use_samples_size is not None:
meta_df, _, y_df, _ = train_test_split(
meta_df, y_df, train_size=use_samples_size,
stratify=y_df.loc[:, "batch"].values,
random_state=random_seed
)
if use_log:
meta_df = meta_df.applymap(np.log)
if pre_transfer is not None:
meta_df, y_df = pre_transfer(meta_df, y_df)
if sub_qc_split:
qc_index = y_df['class'] == 0
return BaseData(meta_df[~qc_index], y_df[~qc_index]), \
BaseData(meta_df[qc_index], y_df[qc_index])
return BaseData(meta_df, y_df)
if __name__ == "__main__":
# for testing
meta_file = "./DATA/Amide/meta.csv"
sample_file = "./DATA/Amide/sample.information.csv"
subject_dat, qc_dat = get_metabolic_data(meta_file, sample_file)
print('')
print(subject_dat.x_df.head())
print(subject_dat.x_df.head())
print(qc_dat.x_df.head())
print(qc_dat.x_df.head())
print(subject_dat[0])
print(qc_dat[0])
print(len(subject_dat))
print(subject_dat.num_features)
print(len(qc_dat))
print(qc_dat.num_features)
print(qc_dat.num_batch_labels)
| 33.719512 | 78 | 0.618445 |
e99c11f7bf21fa183b40aea8403b5e39334b1af1
| 2,022 |
py
|
Python
|
apps/portalbase/macros/page/explorer/1_main.py
|
Jumpscale/jumpscale_portal8
|
3a4d56a1ba985b68fe9b525aed2486a54808332f
|
[
"Apache-2.0"
] | null | null | null |
apps/portalbase/macros/page/explorer/1_main.py
|
Jumpscale/jumpscale_portal8
|
3a4d56a1ba985b68fe9b525aed2486a54808332f
|
[
"Apache-2.0"
] | 74 |
2015-12-28T16:17:20.000Z
|
2021-09-08T12:28:59.000Z
|
apps/portalbase/macros/page/explorer/1_main.py
|
Jumpscale/jumpscale_portal8
|
3a4d56a1ba985b68fe9b525aed2486a54808332f
|
[
"Apache-2.0"
] | null | null | null |
def main(j, args, params, tags, tasklet):
import os
page = args.page
params.result = page
page.addCSS(cssContent='''
.elfinder-contextmenu{
left: 39%;
}
''')
path = ''
space = args.paramsExtra.get('explorerspace')
if space:
space = j.portal.server.active.getSpace(space)
path = space.model.path
if args.tags.tagExists("ppath"):
path = args.tags.tagGet("ppath").replace("+", ":").replace("___", ":").replace("\\", "/")
origpath = path
path = j.dirs.replaceTxtDirVars(path)
if not j.sal.fs.exists(path):
page.addMessage("ERROR:could not find file %s" % path)
apppath = j.portal.server.active.basepath
codepath = os.getcwd()
if path.startswith('/') and not (path.startswith(apppath) or path.startswith(codepath) or origpath != path):
path = ''
if args.tags.tagExists("bucket"):
bucket = args.tags.tagGet("bucket").lower()
if bucket not in j.portal.server.active.bucketsloader.buckets:
page.addMessage("Could not find bucket %s" % bucket)
return params
bucket = j.portal.server.active.bucketsloader.buckets[bucket]
path = bucket.model.path.replace("\\", "/")
if args.tags.tagExists("height"):
height = int(args.tags.tagGet("height"))
else:
height = 500
if args.tags.tagExists("key"):
key = args.tags.tagGet("key")
else:
key = None
if args.tags.tagExists("readonly") or args.tags.labelExists("readonly"):
readonly = True
else:
readonly = False
if args.tags.tagExists("tree") or args.tags.labelExists("tree"):
tree = True
else:
tree = False
if path == "$$path":
params.page.addMessage("Could not find path to display explorer for")
return params
page.addExplorer(path, dockey=key, height=height, readonly=readonly, tree=tree)
return params
def match(j, args, params, tags, tasklet):
return True
| 29.735294 | 116 | 0.605836 |
94a022beb5636d6278267f5338c2c6b4ff637c68
| 7,460 |
py
|
Python
|
bin/retag.py
|
VenoMpie/pyrescene
|
f75d98d9173f1576b5d8fd42da300673e918707c
|
[
"MIT"
] | 18 |
2020-08-09T02:17:46.000Z
|
2022-02-18T09:17:25.000Z
|
bin/retag.py
|
VenoMpie/pyrescene
|
f75d98d9173f1576b5d8fd42da300673e918707c
|
[
"MIT"
] | 1 |
2021-11-23T21:13:37.000Z
|
2021-11-23T21:13:37.000Z
|
bin/retag.py
|
VenoMpie/pyrescene
|
f75d98d9173f1576b5d8fd42da300673e918707c
|
[
"MIT"
] | 9 |
2020-10-15T11:02:49.000Z
|
2022-03-15T10:36:14.000Z
|
#!/usr/bin/env python
# encoding: utf-8
# Copyright (c) 2014-2015 pyReScene
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
This tool fixes the tags of music files.
"""
from optparse import OptionParser
import sys
import os
try:
import _preamble
except ImportError:
pass
import rescene
from rescene.utility import raw_input
from resample.srs import main as srsmain
from resample.main import file_type_info, sample_class_factory, FileType
class NoTaggingAvailable(Exception):
pass
def fix_tracks(srr_file, input_dir, output_dir, always_yes=False):
if not srr_file.endswith(".srr"):
raise AttributeError("The first parameter must be an SRR file.")
if not os.path.isdir(input_dir):
raise AttributeError("The input location must be a directory.")
if not os.path.isdir(output_dir):
try:
os.makedirs(output_dir)
except:
pass
if not os.path.isdir(output_dir):
raise AttributeError("Could not create output location.")
stored_files = rescene.info(srr_file)['stored_files']
# extract non SRS files
successes = 0
failures = 0
skips = 0
srs_files = []
for sfile in stored_files.keys():
if sfile.endswith(".srs"):
srs_files.append(sfile)
else:
print("Extracting %s" % sfile)
rescene.extract_files(srr_file, output_dir, True, sfile)
# fix music files that can be found
for srs in srs_files:
print("Using %s" % srs)
(out, ok) = rescene.extract_files(srr_file, output_dir, True, srs)[0]
if not ok:
# file extraction failed or existing .srs not overwritten
print("Attempt to fix track aborted.")
continue
try:
success = fix_tagging(out, output_dir, input_dir, always_yes)
if success:
successes += 1
else:
# .srs is not a music file
skips += 1
except ValueError:
# pexit() srs.py only throws ValueError
failures += 1
except Exception as e:
print("Unexpected error!")
print(str(e))
failures += 1
finally:
os.remove(out)
print("\n\n%d/%d files succeeded. %d failure%s. %s" % (
successes, failures + successes, failures,
"" if failures == 1 else "s",
"" if not skips else "%s skip%s." %
(skips, "" if skips == 1 else "s")))
def fix_tagging(srs, output_dir, input_dir, always_yes):
"""Fixes the meta data tags of a music track.
srs: srs file location
output_dir: root dir of the fixed release
input_dir: location to find the track to be fixed
always_yes: when to always confirm replacements
"""
try:
srs_info = get_srs_info(srs)
except NoTaggingAvailable as not_music:
print("")
print(str(not_music))
os.remove(srs)
return False
original_name = srs_info.sample_name
print("Fixing %s" % original_name)
musicf = join_fix_case(input_dir, original_name)
out_subfolder = os.path.dirname(os.path.relpath(srs, output_dir))
if not os.path.isfile(musicf):
srr_path = out_subfolder.split("/")
srr_path.append(original_name)
musicf = join_fix_case(input_dir, *srr_path)
if not os.path.isfile(musicf):
print("Track not found")
raise ValueError("not found")
print("From %s" % musicf)
out_location = os.path.join(output_dir, out_subfolder)
srs_parameters = [srs, musicf, "-o", out_location]
if always_yes:
srs_parameters.append("-y")
# can throw ValueError on pexit()
srsmain(srs_parameters, no_exit=True)
return True
def get_srs_info(srs_file):
file_type = file_type_info(srs_file).file_type
if file_type not in (FileType.MP3, FileType.FLAC):
message = "Not a FLAC or MP3 music file: %s." % srs_file
raise NoTaggingAvailable(message)
sample = sample_class_factory(file_type)
srs_data, _tracks = sample.load_srs(srs_file)
return srs_data
def join_fix_case(good_base, *parts):
"""Returns a unix-type case-sensitive path of the joined parts.
An empty string is returned on failure: file not found."""
# check if input is already correct
joined_input = os.path.join(good_base, *parts)
if os.path.exists(joined_input):
return joined_input
corrected_path = good_base
for p in parts:
if not os.path.exists(os.path.join(corrected_path, p)):
listing = os.listdir(corrected_path)
cilisting = [l.lower() for l in listing]
cip = p.lower()
if cip in cilisting:
# get real folder name
l = listing[cilisting.index(cip)]
corrected_path = os.path.join(corrected_path, l)
else:
# file or path does not exist
return ""
else:
corrected_path = os.path.join(corrected_path, p)
return corrected_path
def main(argv=None):
parser = OptionParser(
usage=("Usage: %prog file.srr -i input_dir -o output_dir\n"
"This tool fixes the tags of music files.\n"
"Example usage: %prog rls.srr --output D:\\rls\\"),
version="%prog " + rescene.__version__) # --help, --version
parser.add_option("-i", "--input", dest="input_dir", metavar="DIR",
default=".", help="Specifies input directory. "
"The default input path is the current directory.")
parser.add_option("-o", "--output", dest="output_dir", metavar="DIR",
default=".", help="Specifies output directory. "
"The default output path is the current directory.")
parser.add_option("-y", "--always-yes", dest="always_yes", default=False,
action="store_true", help="assume Yes for all prompts")
parser.add_option("-n", "--always-no", dest="always_no", default=False,
action="store_true", help="never overwrite existing files "
"with the extracted stored files from the SRR")
if argv is None:
argv = sys.argv[1:]
# no arguments given
if not len(argv):
# show application usage
parser.print_help()
return 0
(options, args) = parser.parse_args(args=argv)
# no SRR file provided
if not len(args):
parser.print_help()
return 1
def can_overwrite(file_path):
retvalue = True
if (not options.always_yes and
not options.always_no and os.path.isfile(file_path)):
print("Warning: File %s already exists." % file_path)
char = raw_input("Do you wish to continue? (Y/N): ").lower()
while char not in ('y', 'n'):
char = raw_input("Do you wish to continue? (Y/N): ").lower()
if char == 'n':
retvalue = False
elif options.always_no and os.path.isfile(file_path):
print("(not replaced)")
retvalue = False
return retvalue
rescene.main.can_overwrite = can_overwrite
try:
if fix_tracks(args[0], options.input_dir, options.output_dir,
options.always_yes):
return 0
else:
return 1
except AttributeError as bad_input:
print(bad_input)
return 2
if __name__ == "__main__":
sys.exit(main())
| 30.57377 | 74 | 0.71622 |
1bc45d5962a5a00f308908c1c681263bcf834322
| 4,391 |
py
|
Python
|
roles/common/molecule/default/tests/test_default.py
|
chrisshiels/ansible
|
75760ab42c04614771ea1c6f447c3f6af02238ce
|
[
"MIT"
] | null | null | null |
roles/common/molecule/default/tests/test_default.py
|
chrisshiels/ansible
|
75760ab42c04614771ea1c6f447c3f6af02238ce
|
[
"MIT"
] | 3 |
2021-04-07T23:16:01.000Z
|
2021-09-23T23:21:59.000Z
|
roles/common/molecule/default/tests/test_default.py
|
chrisshiels/ansible
|
75760ab42c04614771ea1c6f447c3f6af02238ce
|
[
"MIT"
] | null | null | null |
import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize('name',
[ 'CentOS-Base',
'CentOS-CR',
'CentOS-Debuginfo',
'CentOS-Media',
'CentOS-Sources',
'CentOS-Vault',
'CentOS-fasttrack' ])
def test_renamed_yum_repository(host, name):
f = host.file('/etc/yum.repos.d/%s.repo' % ( name ))
assert not f.exists
f1 = host.file('/etc/yum.repos.d/%s.repo~' % ( name ))
assert f1.exists
@pytest.mark.parametrize('name',
[ 'base',
'updates',
'extras' ])
def test_yum_repository(host, name):
f = host.file('/etc/yum.repos.d/%s.repo' % ( name ))
assert f.exists
assert f.is_file
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0644
@pytest.mark.parametrize('name',
[ 'lsscsi',
'ltrace',
'strace' ])
def test_package(host, name):
package = host.package(name)
assert package.is_installed
def test_prompt_root(host):
command = host.command('bash -l -c "printenv PS1"')
assert command.stdout == '\\h# '
assert command.rc == 0
@pytest.mark.parametrize('name',
[ 'docker',
'libvirt',
'wheel' ])
def test_usersgroups_secondary_groups(host, name):
group = host.group(name)
assert group.exists
@pytest.mark.parametrize(str.join(',',
[
'name',
'state',
'uid',
'gid',
'groups',
'comment',
'home',
'shell',
'password'
]),
[
(
'user1000',
'present',
1000,
1000,
[ 'wheel', 'libvirt', 'user1000' ],
'User 1000',
'/home/user1000',
'/bin/bash',
'*'
),
(
'user1001',
'present',
1001,
1001,
[ 'wheel', 'docker', 'user1001' ],
'User 1001',
'/home/user1001',
'/bin/bash',
'*'
),
(
'user1002',
'absent',
1002,
1002,
[ 'user1002' ],
'User 1002',
'/home/user1002',
'/bin/bash',
'*'
)
])
def test_usersgroups(host,
name,
state,
uid,
gid,
groups,
comment,
home,
shell,
password):
group = host.group(name)
if state == 'present':
assert group.exists
assert group.gid == gid
else:
assert not group.exists
user = host.user(name)
if state == 'present':
assert user.exists
assert user.uid == uid
assert user.gid == gid
assert user.group == name
assert sorted(user.groups) == sorted(groups)
assert user.gecos == comment
assert user.home == home
assert user.shell == shell
assert user.password == password
else:
assert not user.exists
| 31.364286 | 75 | 0.355728 |
748794bac09d76e00159e63c3a8f32c6593bcc48
| 9,025 |
py
|
Python
|
experiments/classification/train.py
|
JasprW/MiCT-Net-PyTorch
|
c7e876886695b22e53a95387a50e100810dee6f0
|
[
"Apache-2.0"
] | 51 |
2019-11-16T22:20:24.000Z
|
2022-03-15T12:23:15.000Z
|
experiments/classification/train.py
|
scenarios/MiCT-Net-PyTorch
|
fdd72270f70cfe81c24e5cb1daa9df65ecc5b035
|
[
"Apache-2.0"
] | 8 |
2019-12-09T13:08:11.000Z
|
2021-01-09T06:37:53.000Z
|
experiments/classification/train.py
|
scenarios/MiCT-Net-PyTorch
|
fdd72270f70cfe81c24e5cb1daa9df65ecc5b035
|
[
"Apache-2.0"
] | 13 |
2020-01-10T03:18:17.000Z
|
2022-02-23T05:51:01.000Z
|
# ==============================================================================
# Copyright 2019 Florent Mahoudeau. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import shutil
import logging
import pprint
from tqdm import tqdm
from torch.nn import Parameter
import torch
from torch import nn
from torch.utils import data
import torchvision.transforms as transform
from option import Options
import sys
sys.path.insert(0, '../../')
from mictnet.models import get_classification_model
from mictnet.datasets import get_classification_dataset
from mictnet import utils
class Trainer:
def __init__(self, args):
self.args = args
self.logger, self.console, self.output_dir = utils.file.create_logger(args, 'train')
self.logger.info(pprint.pformat(args))
# copy model file
this_dir = os.path.dirname(__file__)
shutil.copy2(
os.path.join(this_dir, '../../mictnet/models', args.model + '.py'),
self.output_dir)
device = 'cuda:{}'.format(args.gpu_id) if torch.cuda.is_available() else 'cpu'
print('Compute device: ' + device)
self.device = torch.device(device)
# data transforms
input_transform = transform.Compose([
transform.ToTensor(),
transform.Normalize([.485, .456, .406], [.229, .224, .225])])
# dataset
data_kwargs = {'logger': self.logger, 'transform': input_transform,
'base_size': args.base_size, 'crop_size': args.crop_size,
'crop_vid': args.crop_vid, 'split': args.split,
'root': args.data_folder}
trainset = get_classification_dataset(args.dataset, mode='train', **data_kwargs)
testset = get_classification_dataset(args.dataset, mode='val', **data_kwargs)
# dataloader
kwargs = {'num_workers': args.workers, 'pin_memory': False} \
if args.cuda else {}
self.trainloader = data.DataLoader(trainset, batch_size=args.batch_size,
drop_last=True, shuffle=True, **kwargs)
self.valloader = data.DataLoader(testset, batch_size=args.batch_size,
drop_last=False, shuffle=False, **kwargs)
self.n_classes = trainset.n_classes
# model
model_kwargs = {'backbone': args.backbone, 'dropout': args.dropout,
'version': args.version} \
if args.model == 'mictresnet' else {}
self.model = get_classification_model(args.model, pretrained=args.pretrained,
**model_kwargs)
#self.logger.info(pprint.pformat(self.model))
# count parameter number
total_params = sum(p.numel() for p in self.model.parameters())
self.logger.info("Total number of parameters: %d" % total_params)
# optimizer
params_list = [{'params': self.model.parameters(), 'lr': args.lr}, ]
self.optimizer = torch.optim.SGD(params_list,
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# define loss function (criterion)
self.criterion = nn.CrossEntropyLoss().to(self.device)
self.model.to(self.device)
self.best_pred = 0.0
# resuming checkpoint
if args.resume is not None and len(args.resume) > 0:
if not os.path.isfile(args.resume):
raise RuntimeError("=> no checkpoint found at '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
if not args.start_epoch:
args.start_epoch = checkpoint['epoch']
if args.cuda:
# model and checkpoint have different structures
pretrained_dict = checkpoint['state_dict']
model_dict = self.model.state_dict()
for name, param in pretrained_dict.items():
if name not in model_dict:
continue
if isinstance(param, Parameter):
# backwards compatibility for serialized parameters
param = param.data
model_dict[name].copy_(param)
else:
self.model.load_state_dict(checkpoint['state_dict'])
if not args.ft:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.best_pred = checkpoint['best_pred']
self.logger.info("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
# clear start epoch if fine-tuning
if args.ft:
args.start_epoch = 0
self.best_pred = 0.0
# lr scheduler
self.scheduler = utils.LRScheduler(self.logger, args.lr_scheduler, args.lr,
args.epochs, len(self.trainloader),
lr_step=args.lr_step)
# don't output to stdout anymore when logging
logging.getLogger('').removeHandler(self.console)
def training(self, epoch):
train_loss = 0.0
self.model.train()
top1 = utils.AverageMeter('acc@1', ':6.2f')
top5 = utils.AverageMeter('acc@5', ':6.2f')
tbar = tqdm(self.trainloader)
for i, (video, target) in enumerate(tbar):
video = video.to(self.device)
target = target.to(self.device)
self.scheduler(self.optimizer, i, epoch, self.best_pred)
self.optimizer.zero_grad()
pred = self.model(video)
loss = self.criterion(pred, target)
loss.backward()
self.optimizer.step()
acc1, acc5 = utils.accuracy(pred, target, topk=(1, 5))
top1.update(acc1[0], args.batch_size)
top5.update(acc5[0], args.batch_size)
train_loss += loss.item()
tbar.set_description(
'train_loss: %.3f, acc1: %.3f, acc5: %.3f' %
(train_loss / (i + 1), top1.avg, top5.avg))
self.logger.info('train_loss: %.3f, acc1: %.3f, acc5: %.3f' %
(train_loss / len(self.trainloader), top1.avg, top5.avg))
if self.args.no_val:
# save checkpoint every epoch
is_best = False
utils.save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_pred': self.best_pred,
}, self.args, is_best)
def validation(self, epoch):
val_loss = 0.0
self.model.eval()
top1 = utils.AverageMeter('acc@1', ':6.2f')
top5 = utils.AverageMeter('acc@5', ':6.2f')
tbar = tqdm(self.valloader, desc='\r')
for i, (video, target) in enumerate(tbar):
video = video.to(self.device)
target = target.to(self.device)
with torch.no_grad():
pred = self.model(video)
loss = self.criterion(pred, target)
acc1, acc5 = utils.accuracy(pred, target, topk=(1, 5))
top1.update(acc1[0], args.batch_size)
top5.update(acc5[0], args.batch_size)
val_loss += loss.item()
tbar.set_description(
'val_loss: %.3f, acc1: %.3f, acc5: %.3f' %
(val_loss / (i + 1), top1.avg, top5.avg))
self.logger.info('val_loss: %.3f, acc1: %.3f, acc5: %.3f' %
(val_loss / len(self.valloader), top1.avg, top5.avg))
new_pred = (top1.avg + top5.avg) / 2
if new_pred > self.best_pred:
is_best = True
self.best_pred = new_pred
utils.save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_pred': self.best_pred,
}, self.args, is_best)
if __name__ == "__main__":
args = Options().parse()
torch.manual_seed(args.seed)
trainer = Trainer(args)
for epoch in range(args.start_epoch, args.epochs):
trainer.training(epoch)
if not args.no_val:
trainer.validation(epoch)
| 40.290179 | 92 | 0.559003 |
b69c27ecf70af2be4b48c6909829466b966dd285
| 513 |
py
|
Python
|
tools/glidein_ps.py
|
ddbox/glideinwms
|
1d0efbc1186ff9bd4cc3010fde6681b4cbe7cd54
|
[
"Apache-2.0"
] | null | null | null |
tools/glidein_ps.py
|
ddbox/glideinwms
|
1d0efbc1186ff9bd4cc3010fde6681b4cbe7cd54
|
[
"Apache-2.0"
] | null | null | null |
tools/glidein_ps.py
|
ddbox/glideinwms
|
1d0efbc1186ff9bd4cc3010fde6681b4cbe7cd54
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2009 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
# Description:
# Execute a ps command on a condor job
#
# Usage:
# glidein_ps.py <cluster>.<process> [-name <schedd_name>] [-pool <pool_name> ] [-timeout <nr secs>] [<options>]
import os.path
import sys
from glideinwms.tools.lib import glideinCmd
sys.path.append(os.path.join(sys.path[0], "../.."))
glideinCmd.exe_cmd_simple(lambda argv: (["ps", "-u", "`id", "-n", "-u`"] + argv))
| 24.428571 | 113 | 0.678363 |
e05eafcf7d984f0143b172044a41118ee7b4f318
| 412 |
py
|
Python
|
brobin/blog/migrations/0002_auto_20160131_1356.py
|
Brobin/brobin.me
|
4a625aedee4a9ebe35c14bdb3118afb98f97b48d
|
[
"Unlicense",
"MIT"
] | 4 |
2016-01-30T20:40:38.000Z
|
2020-11-24T12:23:30.000Z
|
brobin/blog/migrations/0002_auto_20160131_1356.py
|
Brobin/brobin.me
|
4a625aedee4a9ebe35c14bdb3118afb98f97b48d
|
[
"Unlicense",
"MIT"
] | 4 |
2021-04-08T18:29:06.000Z
|
2022-02-10T08:55:47.000Z
|
brobin/blog/migrations/0002_auto_20160131_1356.py
|
Brobin/brobin.me
|
4a625aedee4a9ebe35c14bdb3118afb98f97b48d
|
[
"Unlicense",
"MIT"
] | 3 |
2016-06-14T02:38:43.000Z
|
2020-11-24T13:46:49.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-31 19:56
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'Categories'},
),
]
| 20.6 | 58 | 0.616505 |
ef17c6701d2406d7b64586dd8e6a9f65d70adb35
| 737 |
py
|
Python
|
recruiters/utils.py
|
miclemabasie/jobizar
|
1dde7aab4ba02de75517a5cf91d54b43ac41e3cb
|
[
"MIT"
] | null | null | null |
recruiters/utils.py
|
miclemabasie/jobizar
|
1dde7aab4ba02de75517a5cf91d54b43ac41e3cb
|
[
"MIT"
] | null | null | null |
recruiters/utils.py
|
miclemabasie/jobizar
|
1dde7aab4ba02de75517a5cf91d54b43ac41e3cb
|
[
"MIT"
] | null | null | null |
import string
import random
from django.utils.text import slugify
# Random string generator
def random_string_generator(size=10, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def random_slug_generator(instance, new_slug = None):
if new_slug:
slug = new_slug
else:
slug = slugify(instance.title)
klass = instance.__class__
instance_exist = klass.objects.filter(slug=slug).exists()
if instance_exist:
slug = slug
# create a random string
random_str = random_string_generator(size=4)
new_slug = f"{slug}-{random_str}"
return random_slug_generator(instance, new_slug=new_slug)
return slug
| 28.346154 | 83 | 0.700136 |
ded1b71830cad2aecf0e1bdfcae864b148ae366a
| 901 |
py
|
Python
|
description_parser.py
|
d-kozak/sdu-data-science
|
9197f615fcf9cd8f3305c3b5ea498afe32f0523a
|
[
"MIT"
] | null | null | null |
description_parser.py
|
d-kozak/sdu-data-science
|
9197f615fcf9cd8f3305c3b5ea498afe32f0523a
|
[
"MIT"
] | null | null | null |
description_parser.py
|
d-kozak/sdu-data-science
|
9197f615fcf9cd8f3305c3b5ea498afe32f0523a
|
[
"MIT"
] | null | null | null |
CLEAN = 0
DIRTY = 1
# picture size 227 * 227 * 3 == 154 587
class ImageData():
"""
This data class represents one loaded image in memory
"""
def __init__(self, filename, type):
self.type = type
self.filename = filename
def __str__(self):
return '(' + self.filename + ',' + str(self.type) + ')'
def __repr__(self):
return str(self)
def __iter__(self):
for item in [self.filename, self.type]:
yield item
def file_prefix_from_file_name(filename):
return filename.split('.')[0] + "_"
def parseDescriptions(filename):
desciptions = []
with open(filename, 'r') as file:
for line in file:
(filename, type) = line.split()
desciptions.append(ImageData(filename, CLEAN if type == 'clean' else DIRTY))
return desciptions
desc = parseDescriptions('description_template.txt')
| 23.710526 | 88 | 0.612653 |
9ff4c3183764982c116d4d96e7b41d86a4ab99a8
| 2,428 |
py
|
Python
|
src/filter_data.py
|
TheCoderChris/Hypixel_Stats
|
5334608dc8e5e1b8d8ce80e156a462bcc98f79be
|
[
"MIT"
] | null | null | null |
src/filter_data.py
|
TheCoderChris/Hypixel_Stats
|
5334608dc8e5e1b8d8ce80e156a462bcc98f79be
|
[
"MIT"
] | null | null | null |
src/filter_data.py
|
TheCoderChris/Hypixel_Stats
|
5334608dc8e5e1b8d8ce80e156a462bcc98f79be
|
[
"MIT"
] | null | null | null |
import json
data_keys = []
def nested_set(dic: dict, keys: list, value):
for key in keys[:-1]:
dic = dic.setdefault(key, {})
dic[keys[-1]] = value
return dic
def safeget(dct, *keys):
for key in keys:
try:
dct = dct[key]
except KeyError:
return None
return dct
def better_filter(data: dict, filter: dict, keys: list = []):
global data_keys
output = {}
for key in filter:
if isinstance(filter[key], dict):
keyss = keys.copy()
keyss.append(key)
output[key] = better_filter(data, filter[key], keyss)
else:
temp = keys.copy()
temp.append(key)
data_keys.append(temp)
output[key] = safeget(data, *keys)
return output
def filterit(data, filter_data):
output = {}
for keys in data_keys:
keyss = keys.copy()
keyss.pop(len(keyss)-2)
keysss = keys.copy()
keysss.pop(len(keysss)-1)
keysss.append(keyss[len(keyss)-1])
value = safeget(data, *keyss)
output = nested_set(filter_data, keysss, value)
return output
def dict_depth(my_dict):
if isinstance(my_dict, dict):
return 1 + (max(map(dict_depth, my_dict.values()))
if my_dict else 0)
return 0
def remove_upper(data: dict):
if len(data) == 1 and dict_depth(data) > 2:
data = data[list(data.keys())[0]]
data = remove_upper(data)
return data
def con_to_dct(filter):
res = {}
for key in filter:
if isinstance(filter[key], dict):
res[key] = con_to_dct(filter[key])
elif isinstance(filter[key], list):
res.update({key: {}})
for i, a in enumerate(filter[key]):
res[key].update({filter[key][i]: None})
return res
def main(data: dict, data_filter: dict, remove_unnecessary_keys = True):
if list(data.keys()) == ["success", "player"]:
data = data["player"]
filter_obj = con_to_dct(data_filter)
better_filter(data, filter_obj)
filterit(data, filter_obj)
if remove_unnecessary_keys:
filter_obj = remove_upper(filter_obj)
return filter_obj
# Yes this code is ( ノ ゚ー゚)ノ
| 20.752137 | 73 | 0.535008 |
03c95dfc96254e1017ad7e84aca3af021ac44d3a
| 927 |
py
|
Python
|
src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/AzureParameterGrouping/autorestparametergroupingtestservice/models/parameter_grouping_post_optional_parameters.py
|
fhoering/autorest
|
b36c77ebb6a5c92aca72eea0894a683506af5817
|
[
"MIT"
] | null | null | null |
src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/AzureParameterGrouping/autorestparametergroupingtestservice/models/parameter_grouping_post_optional_parameters.py
|
fhoering/autorest
|
b36c77ebb6a5c92aca72eea0894a683506af5817
|
[
"MIT"
] | null | null | null |
src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/AzureParameterGrouping/autorestparametergroupingtestservice/models/parameter_grouping_post_optional_parameters.py
|
fhoering/autorest
|
b36c77ebb6a5c92aca72eea0894a683506af5817
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ParameterGroupingPostOptionalParameters(Model):
"""Additional parameters for the parameterGrouping_postOptional operation.
:param custom_header:
:type custom_header: str
:param query: Query parameter with default. Default value: 30 .
:type query: int
"""
def __init__(self, custom_header=None, query=30):
self.custom_header = custom_header
self.query = query
| 34.333333 | 78 | 0.621359 |
173c840b181907d793dcdd1c4b9c26d306239eba
| 2,342 |
py
|
Python
|
_static/src/python/Radar/SAR/Imaging/demo_GEN_SAR.py
|
metai/aitrace
|
8e42001f5e3a187801764f413b6ee2f32476c6d1
|
[
"MIT"
] | 1 |
2022-01-03T06:13:31.000Z
|
2022-01-03T06:13:31.000Z
|
_static/src/python/Radar/SAR/Imaging/demo_GEN_SAR.py
|
metai/aitrace
|
8e42001f5e3a187801764f413b6ee2f32476c6d1
|
[
"MIT"
] | null | null | null |
_static/src/python/Radar/SAR/Imaging/demo_GEN_SAR.py
|
metai/aitrace
|
8e42001f5e3a187801764f413b6ee2f32476c6d1
|
[
"MIT"
] | 1 |
2022-01-03T06:13:33.000Z
|
2022-01-03T06:13:33.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from skimage import exposure
import utils
C = 299792458.0
PI = 3.1415926535898
As = 1.58 * PI / 180.0
# As = (90 - 80) * PI / 180.0
Ai = 58.2 * PI / 180.0
Ad = (90 - 58.2) * PI / 180.0
# Ad = (90 - 10.2) * PI / 180.0
F0 = 5.3e9
B = 30e6
Tp = 41.75e-4
Lr = 15.0
La = 1.5
PRF = 1.25698e+03
Fsa = PRF
Fsr = 32.317e+6
H = 793000.0
Vs = 7062
Vg = 7050
Vr = np.sqrt(Vs * Vg)
dt = 1 / (2 * B)
Rc = H / np.cos(PI / 2 - Ad)
Yc = Rc * np.sin(As)
Xc = np.sqrt(Rc**2 - H**2 - Yc**2)
print("Xc, Yc, Rc: ", Xc, Yc, Rc)
XX = 5590
Wl = C / F0
Kr = B / Tp
Ka = 1733
# Ka = (2 * Vr**2) / (Wl * R0)
print("ka: ", Ka, "Wl: ", Wl)
Tsas = 0
Tsae = 1.2219765
Tsa = Tsae - Tsas
Rmin = np.sqrt((Xc - XX)**2 + H**2)
Rmax = np.sqrt((Xc + XX)**2 + H**2)
Tsrs = 2 * Rmin / C
Tsre = 2 * Rmax / C
Tsr = Tsre - Tsrs
print("Tsrs, Tsre, Tsas, Tsae: ", Tsrs, Tsre, Tsas, Tsae)
etas = np.linspace(Tsas, Tsae, Fsa * Tsa)
print(etas.shape)
Na = etas.shape[0]
taus = np.linspace(Tsrs, Tsre, Fsr * Tsr)
print(taus.shape)
Nr = taus.shape[0]
print(Na, Nr)
targets = [[-512, -512, 1.0], [0, 0, 1.0], [512, 512, 1.0]]
for tg in targets:
tg[0] = tg[0] + Xc
tg[1] = tg[1] + Yc
# s = np.zeros(Na, Nr)
s = []
for eta in etas:
s_eta = 0.0
for tg in targets:
R0 = np.sqrt(H**2 + tg[1]**2)
eta_c = tg[1] / Vg
R_eta = np.sqrt(tg[0]**2 + H**2 + (tg[1] - Vr * eta)**2)
# print("R_eta, eta, eta_c: ", R_eta, eta, eta_c)
wrs = utils.rect((taus - 2 * R_eta / C) / Tp) * \
np.exp(1j * PI * Kr * (taus - 2 * R_eta / C)**2)
wa = utils.sinc(Lr * np.arctan(Vg * (eta - eta_c) / R0) / Wl)**2
tt = utils.rect((taus - 2 * R_eta / C) / Tp)
print(taus.min(), taus.max())
# print("tt ----> min, max", tt.min(), tt.max())
# print(wrs.max())
# print("wa: ", wa)
phase_eta = -1j * 4 * PI * F0 * R_eta / C + \
1j * PI * Kr * (taus - 2 * R_eta / C)**2
# print(phase_eta.min(), phase_eta.max())
s_eta = s_eta + tg[2] * wrs * wa * np.exp(phase_eta)
# print(np.sum(s_eta), tg[2])
s.append(s_eta)
s = np.array(s)
print("s.shape", s.shape)
print("s.min, s.max", s.min(), s.max())
extent = [-1024, 1024, -1024, 1024]
plt.figure()
plt.imshow(np.abs(s), extent=extent)
plt.show()
| 20.725664 | 72 | 0.514091 |
553c39bb041d7484c3bf3f880a5462efffcd4d4a
| 7,084 |
py
|
Python
|
src/lib/public_method.py
|
linlin-coder/STPG
|
ba4db9067348f7a4b839b7d16a42e4ffe7201df7
|
[
"Apache-2.0"
] | null | null | null |
src/lib/public_method.py
|
linlin-coder/STPG
|
ba4db9067348f7a4b839b7d16a42e4ffe7201df7
|
[
"Apache-2.0"
] | null | null | null |
src/lib/public_method.py
|
linlin-coder/STPG
|
ba4db9067348f7a4b839b7d16a42e4ffe7201df7
|
[
"Apache-2.0"
] | null | null | null |
# !/usr/bin/python
# -*- coding: UTF-8 -*-
import configparser
import datetime
import os,shutil
import re
import subprocess
import sys
import yaml
bindir = os.path.realpath(os.path.dirname(__file__))
class Log():
def __init__(self,filename,funcname=''):
self.filename = filename
self.funcname = funcname
def _format(self,level,message):
date_now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
formatter = ''
if self.funcname == '':
formatter = '\n{0} - {1} - {2} - {3} \n'.format(date_now,self.filename,level,message)
else:
formatter = '\n{0} - {1} - {2} - {3} - {4} \n'.format(date_now,self.filename,self.funcname,level,message)
return formatter
def info(self, message):
sys.stdout.write(self._format('INFO',message=message))
def debug(self, message):
sys.stdout.write(self._format('DEBUG',message=message))
def warning(self, message):
sys.stdout.write(self._format('WARNING',message=message))
def error(self, message):
sys.stderr.write(self._format('ERROR',message=message))
sys.exit(1)
def fatal(self, message, exit_code=1):
sys.stderr.write(self._format('fatal', message=message))
sys.exit(exit_code)
std = Log(os.path.basename(__file__))
class myconf(configparser.ConfigParser):
def __init__(self,defaults=None):
configparser.ConfigParser.__init__(self,defaults=None,allow_no_value=True)
def optionxform(self, optionstr):
return optionstr
class Config():
def __init__(self, config_file, method=''):
self.config_file = config_file
self.method = method
self.check_file_exist()
self.config = self.read_config()
def check_file_exist(self):
if not os.path.exists(self.config_file):
std.error('file {} not exist,please check it!!!'.format(self.config_file))
def read_config(self):
config = myconf()
config.read(self.config_file)
return config
def all_block(self,title,head):
return self.config[title][head]
def return_block_list(self,title):
try:
data = self.config[title]
except :
return []
info = {}
for rn in data:
info[rn] = data[rn].rstrip().split(',')
return info
# class
# get realpath
def obtain_file_realpath(one_path):
if os.path.exists(one_path):
return os.path.realpath(one_path)
else:
return None
def makedir(indir,err=1):
if os.path.exists(indir):return
try:
os.makedirs(indir)
except Exception as e:
if os.system('mkdir -p {0}'.format(indir))!=0:
std.error('创建目录失败:{}'.format(indir))
def copy_target_dir(source_path, target_path):
if not os.path.exists(source_path):
std.warning("来源目录缺失,请检测,文件拷贝失败,目录为:{0}".format(source_path))
return
if os.path.exists(target_path):
std.warning("输出目录存在,即将删除,目录为:{0}".format(target_path))
shutil.rmtree(target_path)
shutil.copytree(source_path, target_path)
# Set operation
def set_diff_operation(list1,list2):
set_1,set_2,set_more1,set_more2 = (),(),(),()
set_1 = set(list1)
set_2 = set(list2)
#集合运算
set_1_2 = set_1 & set_2
set_more1 = set_1 -set_1_2
set_more2 = set_2 -set_1_2
return set_more1,set_more2
## get recursively downstream jobs
def get_succeeds(job,order_relation):
recursiveSucceeds = [job]
succeeds = []
if job in order_relation:
succeeds = order_relation[job]
if len(succeeds) >0:
for each in succeeds:
recursiveSucceeds.extend(get_succeeds(each,order_relation))
return recursiveSucceeds
def subprocess_run(cmd):
std.info('Running command is:{0}'.format(cmd))
back = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE,encoding="utf-8",timeout=1)#,timeout=1,encoding='utf-8',
if back.returncode == 0:
std.info('CMD:{}\texecute success'.format(cmd))
return back.stdout
else:
std.error('CMD:{}\texecute failed !!!'.format(cmd))
return back
def read_yaml(files):
with open(files, "r") as f:
cfg = f.read()
config = yaml.load(cfg, Loader=yaml.SafeLoader)
#print(config)
return(config)
class Dict2Obj(object):
"""将一个字典转换为类"""
def __init__(self, dictionary):
"""Constructor"""
for key in dictionary:
setattr(self, key, dictionary[key])
def __repr__(self):
"""print 或str 时,让实例对象以字符串格式输出"""
return "<Dict2Obj: %s>" % self.__dict__
def ReadConfig(file_list):
pat = re.compile('\[(\S+)\]')
record = {}
para = {}
db = {}
header = ''
count = {}
s_index = {} ## to record table occurence time
for infile in file_list:
# f_file=open(infile)
with open(infile, 'r', encoding='utf-8') as f_config:
f_file = f_config.readlines()
for line in f_file:
line=line.strip()
if line.startswith('#') or not line:continue
elif line.startswith('['):
match = pat.search(line)
if match :
header = match.group(1)
if header not in count :
count[header] = 0
record[header] = []
s_index[header] = []
else:
count[header] += 1
else:
if header == 'Para':
tmp = [i.strip() for i in line.rstrip().split('=',1) ]
if len(tmp) < 2 :
sys.stderr.write("Error:{0} is lack of value".format(line.rstrip()))
sys.exit(1)
else:
para[tmp[0]] = tmp[1]
if header == 'DB':
tmp = [i.strip() for i in line.rstrip().split('=',1) ]
if len(tmp) < 2 :
sys.stderr.write("Error:{0} is lack of value".format(line.rstrip()))
sys.exit(1)
else:
db[tmp[0]] = tmp[1]
else:
tmp = [i.strip() for i in re.split('[=\t]',line) ]#line.rstrip().split('=',1) ]#line.rstrip().split('\t')
record[header].append(tmp)
s_index[header].append(count[header])
# f_file.close()
# print(record)
return record,para,db,s_index
class multidict(dict):
def __getitem__(self, item):
try:
return dict.__getitem__(self,item)
except KeyError:
value = self[item] = type(self)()
return value
def __missing__(self, key):
value = self[key] = type(self)()
return value
| 32.347032 | 146 | 0.55096 |
9cab341bf5c954b01c6016fbf4757ed295dbb54c
| 5,406 |
py
|
Python
|
functions/slackbot-tv-search-result/package/function.py
|
c0dyhi11/sickchill-slackbot
|
d3cbd270332d2276441be1ce858c0b83a7207d27
|
[
"Apache-2.0"
] | null | null | null |
functions/slackbot-tv-search-result/package/function.py
|
c0dyhi11/sickchill-slackbot
|
d3cbd270332d2276441be1ce858c0b83a7207d27
|
[
"Apache-2.0"
] | 1 |
2021-09-08T15:58:29.000Z
|
2021-09-08T15:58:29.000Z
|
functions/slackbot-tv-search-result/package/function.py
|
c0dyhi11/sickchill-slackbot
|
d3cbd270332d2276441be1ce858c0b83a7207d27
|
[
"Apache-2.0"
] | null | null | null |
import json
import requests
import urllib.parse
from helper import fetch_kube_data
from flask import request, Response
from flask import current_app
def get_search_results(url, search_text):
headers = {'Accept': 'application/json'}
print(search_text)
url += urllib.parse.quote(search_text)
rest_call = requests.get(url, headers=headers)
return json.loads(rest_call.text)
def build_message(shows, search_text, user):
url_prefix = 'https://www.thetvdb.com/?tab=season&seriesid='
payload = json.loads("""
{
"blocks": [
{
"type": "header",
"text": {
"type": "plain_text",
"text": "",
"emoji": true
}
},
{
"type": "section",
"block_id": "section1",
"text": {
"type": "mrkdwn",
"text": ""
}
},
{
"type": "divider"
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "Select a show from above to be added to the search queue:"
},
"accessory": {
"type": "static_select",
"placeholder": {
"type": "plain_text",
"text": "Select a show",
"emoji": false
},
"options": []
}
}
]
}
""")
text_header = "Here are the search results for: {}".format(search_text)
text_body = "@{}\n".format(user)
options = []
show_results = shows['data']['results']
sort_shows_by_name = sorted(show_results, key=lambda i: i['name'])
sort_shows_by_attainment = sorted(sort_shows_by_name,
key=lambda i: i['in_show_list'],
reverse=True)
downloadable_show = False
for show in sort_shows_by_attainment:
if show['in_show_list']:
text_body += ":warning: <{}{}|{}> ({}) `[Already Added to Search Queue]`\n".format(url_prefix, str(show['tvdbid']), show['name'], show['first_aired'])
else:
downloadable_show = True
text_body += ":large_blue_circle: <{}{}| {}> ({})\n".format(url_prefix, str(show['tvdbid']), show['name'], show['first_aired'])
options.append({"text": {
"type": "plain_text",
"text": "{} ({})".format(show['name'], show['first_aired']),
"emoji": False
},
"value": "{}|{}".format(show['name'],str(show['tvdbid']))
})
options.append({"text": {
"type": "plain_text",
"text": "*Cancel*",
"emoji": False},
"value": "cancel"
})
payload['blocks'][3]['accessory']['options'] = options
if len(text_body) >= 3000:
text_body = "@{}\n:x: There were far too many results for your search term: *{}*.\nPlease add words to narrow your search!".format(user, search_text)
downloadable_show = False
elif len(sort_shows_by_attainment) == 0:
text_body += ":x: We were unable to find any TV Shows matching the text: *{}*\n Please try searching again with different keywords.".format(search_text)
if not downloadable_show:
del payload['blocks'][3]
del payload['blocks'][2]
payload['blocks'][0]['text']['text'] = text_header
payload['blocks'][1]['text']['text'] = text_body
return payload
def slack_webhook(url, payload):
headers = {'Accept': 'application/json',
'Content-type': 'application/json'}
print(json.dumps(payload))
rest_call = requests.post(url, headers=headers, data=json.dumps(payload))
return rest_call.text
def main():
sickchill_url = fetch_kube_data("secret", "default", "slackbot", "sickchill-url")
slack_url = fetch_kube_data("secret", "default", "slackbot", "slack-url")
request_body = json.loads(request.get_data().decode('utf-8'))
search_text = request_body['text']
user = request_body['user']
search_result = get_search_results(sickchill_url, search_text)
slack_message = build_message(search_result, search_text, user)
webhook_result = slack_webhook(slack_url, slack_message)
return webhook_result, 200
if __name__ == "__main__":
main()
| 42.234375 | 168 | 0.443951 |
6261c61cdca3bf4f9a4ff46a7fa1871ac3acbdb2
| 1,311 |
py
|
Python
|
appscan/admin.py
|
zx273983653/vulscan
|
787397e267c4e6469522ee0abe55b3e98f968d4a
|
[
"MIT"
] | 582 |
2019-02-23T09:23:33.000Z
|
2022-03-31T04:42:08.000Z
|
appscan/admin.py
|
git-wsf/vulscan
|
112f8d6104daecfaaad579f73029a26d56aaa9b3
|
[
"MIT"
] | 6 |
2019-03-20T10:37:48.000Z
|
2020-03-10T06:20:07.000Z
|
appscan/admin.py
|
git-wsf/vulscan
|
112f8d6104daecfaaad579f73029a26d56aaa9b3
|
[
"MIT"
] | 183 |
2019-02-23T06:00:18.000Z
|
2022-03-20T02:17:57.000Z
|
#coding=utf-8
from django.contrib import admin
# Register your models here.
from appscan.models import poc_list
from appscan.models import navigation,navigation_url,vul_scan, user_scan,vul_state
#POC 数据
class poc_listAdmin(admin.ModelAdmin):
list_display = ('vulID','category','vulType','cvss','filename','appName','appPowerLink','appVersion','author','createDate','desc','install_requires','name','references','samples','updateDate','version',) # 列表显示的字段
admin.site.register(poc_list,poc_listAdmin)
#导航菜单
class navigationAdmin(admin.ModelAdmin):
list_display = ('id','nav_name',) # 列表显示的字段
admin.site.register(navigation,navigationAdmin)
#导航数据
class navigation_urlAdmin(admin.ModelAdmin):
list_display = ('id','nav_name','nav_title','nav_url',) # 列表显示的字段
admin.site.register(navigation_url,navigation_urlAdmin)
#漏洞扫描
class vul_scanAdmin(admin.ModelAdmin):
list_display = ('id','username','appname','url', 'pocname', 'date', 'cvss') # 列表显示的字段
admin.site.register(vul_scan,vul_scanAdmin)
#用户扫描
class user_scanAdmin(admin.ModelAdmin):
list_display = ('id', 'username', 'url', 'date')
admin.site.register(user_scan,user_scanAdmin)
#漏洞修复
class vul_stateAdmin(admin.ModelAdmin):
list_display = ('id', 'url', 'vulname','cvss','state')
admin.site.register(vul_state, vul_stateAdmin)
| 33.615385 | 217 | 0.754386 |
24303fcbe11805798f91b7c3a6339f58d3ac546d
| 8,889 |
py
|
Python
|
src/oscar/test/factories/__init__.py
|
congnguyenthe/base-django-oscar
|
e328610026275ded385f837f20897af653563055
|
[
"BSD-3-Clause"
] | 1 |
2021-11-13T22:44:25.000Z
|
2021-11-13T22:44:25.000Z
|
src/oscar/test/factories/__init__.py
|
congnguyenthe/base-django-oscar
|
e328610026275ded385f837f20897af653563055
|
[
"BSD-3-Clause"
] | 7 |
2021-01-06T20:02:43.000Z
|
2022-03-12T00:42:59.000Z
|
src/oscar/test/factories/__init__.py
|
congnguyenthe/base-django-oscar
|
e328610026275ded385f837f20897af653563055
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
import datetime
import random
from decimal import Decimal as D
from django.conf import settings
from django.utils import timezone
from oscar.core.loading import get_class, get_model
from oscar.test.factories.address import * # noqa
from oscar.test.factories.basket import * # noqa
from oscar.test.factories.catalogue import * # noqa
from oscar.test.factories.contrib import * # noqa
from oscar.test.factories.customer import * # noqa
from oscar.test.factories.models import * # noqa
from oscar.test.factories.offer import * # noqa
from oscar.test.factories.order import * # noqa
from oscar.test.factories.partner import * # noqa
from oscar.test.factories.payment import * # noqa
from oscar.test.factories.voucher import * # noqa
from oscar.test.factories.wishlists import * # noqa
Basket = get_model('basket', 'Basket')
Free = get_class('shipping.methods', 'Free')
Voucher = get_model('voucher', 'Voucher')
OrderCreator = get_class('order.utils', 'OrderCreator')
OrderTotalCalculator = get_class('checkout.calculators',
'OrderTotalCalculator')
SurchargeApplicator = get_class('checkout.applicator', 'SurchargeApplicator')
Partner = get_model('partner', 'Partner')
StockRecord = get_model('partner', 'StockRecord')
PurchaseInfo = get_class('partner.strategy', 'PurchaseInfo')
Default = get_class('partner.strategy', 'Default')
StockRequired = get_class('partner.availability', 'StockRequired')
FixedPrice = get_class('partner.prices', 'FixedPrice')
Product = get_model('catalogue', 'Product')
ProductClass = get_model('catalogue', 'ProductClass')
ProductAttribute = get_model('catalogue', 'ProductAttribute')
ProductAttributeValue = get_model('catalogue', 'ProductAttributeValue')
ProductImage = get_model('catalogue', 'ProductImage')
WeightBand = get_model('shipping', 'WeightBand')
WeightBased = get_model('shipping', 'WeightBased')
Range = get_model('offer', 'Range')
Condition = get_model('offer', 'Condition')
Benefit = get_model('offer', 'Benefit')
ConditionalOffer = get_model('offer', 'ConditionalOffer')
def create_stockrecord(product=None, price_excl_tax=None, partner_sku=None,
num_in_stock=None, partner_name=None,
currency=settings.OSCAR_DEFAULT_CURRENCY,
partner_users=None):
if product is None:
product = create_product()
partner, __ = Partner.objects.get_or_create(name=partner_name or '')
if partner_users:
for user in partner_users:
partner.users.add(user)
if price_excl_tax is None:
price_excl_tax = D('9.99')
if partner_sku is None:
partner_sku = 'sku_%d_%d' % (product.id, random.randint(0, 10000))
return product.stockrecords.create(
partner=partner, partner_sku=partner_sku,
price_currency=currency,
price_excl_tax=price_excl_tax, num_in_stock=num_in_stock)
def create_purchase_info(record):
return PurchaseInfo(
price=FixedPrice(
record.price_currency,
record.price_excl_tax,
D('0.00') # Default to no tax
),
availability=StockRequired(
record.net_stock_level),
stockrecord=record
)
def create_product(upc=None, title="Dùmϻϒ title",
product_class="Dùmϻϒ item class",
partner_name=None, partner_sku=None, price=None,
num_in_stock=None, attributes=None,
partner_users=None, **kwargs):
"""
Helper method for creating products that are used in tests.
"""
product_class, __ = ProductClass._default_manager.get_or_create(
name=product_class)
product = product_class.products.model(
product_class=product_class,
title=title, upc=upc, **kwargs)
if kwargs.get('parent') and 'structure' not in kwargs:
product.structure = 'child'
if attributes:
for code, value in attributes.items():
# Ensure product attribute exists
product_class.attributes.get_or_create(name=code, code=code)
setattr(product.attr, code, value)
product.save()
# Shortcut for creating stockrecord
stockrecord_fields = [
price, partner_sku, partner_name, num_in_stock, partner_users]
if any([field is not None for field in stockrecord_fields]):
create_stockrecord(
product, price_excl_tax=price, num_in_stock=num_in_stock,
partner_users=partner_users, partner_sku=partner_sku,
partner_name=partner_name)
return product
def create_product_image(product=None,
original=None,
caption='Dummy Caption',
display_order=None,
):
if not product:
product = create_product()
if not display_order:
if not product.images.all():
display_order = 0
else:
display_order = max(
[i.display_order for i in product.images.all()]) + 1
kwargs = {'product_id': product.id,
'original': original,
'display_order': display_order,
'caption': caption, }
return ProductImage.objects.create(**kwargs)
def create_basket(empty=False):
basket = Basket.objects.create()
basket.strategy = Default()
if not empty:
product = create_product()
create_stockrecord(product, num_in_stock=2)
basket.add_product(product)
return basket
def create_order(number=None, basket=None, user=None, shipping_address=None,
shipping_method=None, billing_address=None,
total=None, **kwargs):
"""
Helper method for creating an order for testing
"""
if not basket:
basket = Basket.objects.create()
basket.strategy = Default()
product = create_product()
create_stockrecord(
product, num_in_stock=10, price_excl_tax=D('10.00'))
basket.add_product(product)
if not basket.id:
basket.save()
if shipping_method is None:
shipping_method = Free()
shipping_charge = shipping_method.calculate(basket)
surcharges = SurchargeApplicator().get_applicable_surcharges(basket)
if total is None:
total = OrderTotalCalculator().calculate(basket, shipping_charge, surcharges)
kwargs['surcharges'] = surcharges
order = OrderCreator().place_order(
order_number=number,
user=user,
basket=basket,
shipping_address=shipping_address,
shipping_method=shipping_method,
shipping_charge=shipping_charge,
billing_address=billing_address,
total=total,
**kwargs)
basket.set_as_submitted()
return order
def create_offer(name="Dùmϻϒ offer", offer_type="Site",
max_basket_applications=None, range=None, condition=None,
benefit=None, priority=0, status=None, start=None, end=None):
"""
Helper method for creating an offer
"""
if range is None:
range, __ = Range.objects.get_or_create(
name="All products räñgë", includes_all_products=True)
if condition is None:
condition, __ = Condition.objects.get_or_create(
range=range, type=Condition.COUNT, value=1)
if benefit is None:
benefit, __ = Benefit.objects.get_or_create(
range=range, type=Benefit.PERCENTAGE, value=20)
if status is None:
status = ConditionalOffer.OPEN
# Create start and end date so offer is active
now = timezone.now()
if start is None:
start = now - datetime.timedelta(days=1)
if end is None:
end = now + datetime.timedelta(days=30)
return ConditionalOffer.objects.create(
name=name,
start_datetime=start,
end_datetime=end,
status=status,
offer_type=offer_type,
condition=condition,
benefit=benefit,
max_basket_applications=max_basket_applications,
priority=priority)
def create_voucher(**kwargs):
"""
Helper method for creating a voucher
"""
defaults = {
'name': "Dùmϻϒ voucher",
'code': "test",
'start_datetime': timezone.now(),
'end_datetime': timezone.now() + datetime.timedelta(days=12)
}
defaults.update(kwargs)
voucher = VoucherFactory(**defaults)
voucher.offers.add(create_offer(offer_type='Voucher'))
return voucher
def create_shipping_weight_based(default_weight=D(1)):
return WeightBased.objects.create(
default_weight=default_weight
)
def create_shipping_weight_band(upper_limit, charge, weight_based=None):
if not weight_based:
weight_based = create_shipping_weight_based()
return WeightBand.objects.create(
method=weight_based,
upper_limit=upper_limit,
charge=charge
)
| 35.134387 | 85 | 0.665879 |
b9262398f60d52217d4c038ab952524120b01872
| 430 |
py
|
Python
|
Recursion/Assignment-3/2 Simple Combination Sum LeetCode.py
|
techonair/Programming-Pathshala
|
155dce78484bb965f24289e241e02db4be16bd3b
|
[
"MIT"
] | null | null | null |
Recursion/Assignment-3/2 Simple Combination Sum LeetCode.py
|
techonair/Programming-Pathshala
|
155dce78484bb965f24289e241e02db4be16bd3b
|
[
"MIT"
] | null | null | null |
Recursion/Assignment-3/2 Simple Combination Sum LeetCode.py
|
techonair/Programming-Pathshala
|
155dce78484bb965f24289e241e02db4be16bd3b
|
[
"MIT"
] | null | null | null |
# Link: https://leetcode.com/problems/combination-sum-iii/
k, n = map(int, input().split(" "))
ans = []
tmp = []
size = 0
sum = 0
i = 1
def isCombSum(n, k, i , size, sum, tmp):
if size == k and sum == n:
ans.append(tmp)
if i < 10 and size < k and sum < n:
isCombSum(n, k, i+1, size, sum, tmp)
isCombSum(n, k, i+1, size+1, sum+i, tmp+[i])
return ans
print(isCombSum(n, k, i, size, sum, tmp))
| 22.631579 | 58 | 0.548837 |
91d56b85e1a48dbbcc2ce4401ae0fd8aaa0f955a
| 5,791 |
py
|
Python
|
laserfiche_api/api/simple_searches_api.py
|
Layer8Err/laserfiche_api
|
8c9030c8f5cc245b61858bd096a1ad3c58cdbfd2
|
[
"BSD-2-Clause"
] | 1 |
2021-06-17T23:51:25.000Z
|
2021-06-17T23:51:25.000Z
|
laserfiche_api/api/simple_searches_api.py
|
Layer8Err/laserfiche_api
|
8c9030c8f5cc245b61858bd096a1ad3c58cdbfd2
|
[
"BSD-2-Clause"
] | null | null | null |
laserfiche_api/api/simple_searches_api.py
|
Layer8Err/laserfiche_api
|
8c9030c8f5cc245b61858bd096a1ad3c58cdbfd2
|
[
"BSD-2-Clause"
] | null | null | null |
# coding: utf-8
"""
Laserfiche API
Welcome to the Laserfiche API Swagger Playground. You can try out any of our API calls against your live Laserfiche Cloud account. Visit the developer center for more details: <a href=\"https://developer.laserfiche.com\">https://developer.laserfiche.com</a><p><strong>Build# : </strong>650780</p> # noqa: E501
OpenAPI spec version: 1-alpha
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from laserfiche_api.api_client import ApiClient
class SimpleSearchesApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_simple_search_operation(self, repo_id, **kwargs): # noqa: E501
"""create_simple_search_operation # noqa: E501
- Runs a \"simple\" search operation on the repository. - Returns a truncated search result listing. - Search result listing may be truncated, depending on number of results. Additionally, searches may time out if they take too long. Use the other search route to run full searches. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_simple_search_operation(repo_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str repo_id: The requested repository ID. (required)
:param SimpleSearchRequest body: The Laserfiche search command to run.
:return: ODataValueOfIListOfEntry
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_simple_search_operation_with_http_info(repo_id, **kwargs) # noqa: E501
else:
(data) = self.create_simple_search_operation_with_http_info(repo_id, **kwargs) # noqa: E501
return data
def create_simple_search_operation_with_http_info(self, repo_id, **kwargs): # noqa: E501
"""create_simple_search_operation # noqa: E501
- Runs a \"simple\" search operation on the repository. - Returns a truncated search result listing. - Search result listing may be truncated, depending on number of results. Additionally, searches may time out if they take too long. Use the other search route to run full searches. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_simple_search_operation_with_http_info(repo_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str repo_id: The requested repository ID. (required)
:param SimpleSearchRequest body: The Laserfiche search command to run.
:return: ODataValueOfIListOfEntry
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['repo_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_simple_search_operation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'repo_id' is set
if ('repo_id' not in params or
params['repo_id'] is None):
raise ValueError("Missing the required parameter `repo_id` when calling `create_simple_search_operation`") # noqa: E501
collection_formats = {}
path_params = {}
if 'repo_id' in params:
path_params['repoId'] = params['repo_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Authorization'] # noqa: E501
return self.api_client.call_api(
'/v1-alpha/Repositories/{repoId}/SimpleSearches', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ODataValueOfIListOfEntry', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 42.270073 | 314 | 0.653773 |
d9fd26436c5cab68cb0f06f15c082e6ca540bcf6
| 2,819 |
py
|
Python
|
dizoo/classic_control/cartpole/config/parallel/cartpole_dqn_config.py
|
uuid0000/DI-engine
|
cc2713fa01e5288bae21cfeb595729d665e092d1
|
[
"Apache-2.0"
] | 1 |
2021-07-13T02:56:34.000Z
|
2021-07-13T02:56:34.000Z
|
dizoo/classic_control/cartpole/config/parallel/cartpole_dqn_config.py
|
uuid0000/DI-engine
|
cc2713fa01e5288bae21cfeb595729d665e092d1
|
[
"Apache-2.0"
] | null | null | null |
dizoo/classic_control/cartpole/config/parallel/cartpole_dqn_config.py
|
uuid0000/DI-engine
|
cc2713fa01e5288bae21cfeb595729d665e092d1
|
[
"Apache-2.0"
] | null | null | null |
from easydict import EasyDict
cartpole_dqn_config = dict(
env=dict(
collector_env_num=8,
collector_episode_num=2,
evaluator_env_num=5,
evaluator_episode_num=1,
stop_value=195,
),
policy=dict(
cuda=False,
model=dict(
obs_shape=4,
action_shape=2,
encoder_hidden_size_list=[128, 128, 64],
dueling=True,
),
nstep=3,
discount_factor=0.97,
learn=dict(
batch_size=32,
learning_rate=0.001,
learner=dict(
learner_num=1,
send_policy_freq=1,
),
),
collect=dict(
n_sample=16,
collector=dict(
collector_num=2,
update_policy_second=3,
),
),
eval=dict(evaluator=dict(eval_freq=50, )),
other=dict(
eps=dict(
type='exp',
start=0.95,
end=0.1,
decay=100000,
),
replay_buffer=dict(
replay_buffer_size=100000,
enable_track_used_data=False,
),
commander=dict(
collector_task_space=2,
learner_task_space=1,
eval_interval=5,
),
),
),
)
cartpole_dqn_config = EasyDict(cartpole_dqn_config)
main_config = cartpole_dqn_config
cartpole_dqn_create_config = dict(
env=dict(
type='cartpole',
import_names=['dizoo.classic_control.cartpole.envs.cartpole_env'],
),
env_manager=dict(type='base'),
policy=dict(type='dqn_command'),
learner=dict(type='base', import_names=['ding.worker.learner.base_learner']),
collector=dict(
type='zergling',
import_names=['ding.worker.collector.zergling_collector'],
),
commander=dict(
type='solo',
import_names=['ding.worker.coordinator.solo_parallel_commander'],
),
comm_learner=dict(
type='flask_fs',
import_names=['ding.worker.learner.comm.flask_fs_learner'],
),
comm_collector=dict(
type='flask_fs',
import_names=['ding.worker.collector.comm.flask_fs_collector'],
),
)
cartpole_dqn_create_config = EasyDict(cartpole_dqn_create_config)
create_config = cartpole_dqn_create_config
cartpole_dqn_system_config = dict(
coordinator=dict(),
path_data='./data',
path_policy='./policy',
communication_mode='auto',
learner_gpu_num=1,
)
cartpole_dqn_system_config = EasyDict(cartpole_dqn_system_config)
system_config = cartpole_dqn_system_config
if __name__ == '__main__':
from ding.entry.parallel_entry import parallel_pipeline
parallel_pipeline([main_config, create_config, system_config], seed=9)
| 27.910891 | 81 | 0.59099 |
3b06a941092d435dd7d000258568ed0d9b9a76f0
| 1,609 |
py
|
Python
|
util/config/validators/validate_oidc.py
|
dongboyan77/quay
|
8018e5bd80f17e6d855b58b7d5f2792d92675905
|
[
"Apache-2.0"
] | 1 |
2020-10-16T19:30:41.000Z
|
2020-10-16T19:30:41.000Z
|
util/config/validators/validate_oidc.py
|
dongboyan77/quay
|
8018e5bd80f17e6d855b58b7d5f2792d92675905
|
[
"Apache-2.0"
] | 15 |
2020-06-18T15:32:06.000Z
|
2022-03-03T23:06:24.000Z
|
util/config/validators/validate_oidc.py
|
dongboyan77/quay
|
8018e5bd80f17e6d855b58b7d5f2792d92675905
|
[
"Apache-2.0"
] | null | null | null |
from oauth.loginmanager import OAuthLoginManager
from oauth.oidc import OIDCLoginService, DiscoveryFailureException
from util.config.validators import BaseValidator, ConfigValidationException
class OIDCLoginValidator(BaseValidator):
name = "oidc-login"
@classmethod
def validate(cls, validator_context):
config = validator_context.config
client = validator_context.http_client
login_manager = OAuthLoginManager(config, client=client)
for service in login_manager.services:
if not isinstance(service, OIDCLoginService):
continue
if service.config.get("OIDC_SERVER") is None:
msg = "Missing OIDC_SERVER on OIDC service %s" % service.service_id()
raise ConfigValidationException(msg)
if service.config.get("CLIENT_ID") is None:
msg = "Missing CLIENT_ID on OIDC service %s" % service.service_id()
raise ConfigValidationException(msg)
if service.config.get("CLIENT_SECRET") is None:
msg = "Missing CLIENT_SECRET on OIDC service %s" % service.service_id()
raise ConfigValidationException(msg)
try:
if not service.validate():
msg = "Could not validate OIDC service %s" % service.service_id()
raise ConfigValidationException(msg)
except DiscoveryFailureException as dfe:
msg = "Could not validate OIDC service %s: %s" % (service.service_id(), dfe.message)
raise ConfigValidationException(msg)
| 42.342105 | 100 | 0.649472 |
e4905dc7121b8c390d27c40a4a7f2964612b00cb
| 26,265 |
py
|
Python
|
main.py
|
MorningStarOvO/Mdetr
|
048dd063cc0290a153b1754368be1f2e1a3e6825
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
MorningStarOvO/Mdetr
|
048dd063cc0290a153b1754368be1f2e1a3e6825
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
MorningStarOvO/Mdetr
|
048dd063cc0290a153b1754368be1f2e1a3e6825
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# -------------------- 导入必要的包 -------------------- #
# ----- 系统操作相关 ----- #
import datetime
import os
import time
from pathlib import Path
# ----- 读取数据相关 ----- #
import json
# ----- 数据处理相关 ----- #
import random
from collections import namedtuple
from copy import deepcopy
import numpy as np
# ----- 模型定义相关 ----- #
from functools import partial
import torch
import torch.utils
from torch.utils.data import ConcatDataset, DataLoader, DistributedSampler
# ----- 命令行操作相关 ----- #
import argparse
# ----- 自定义的包 ----- #
import util.dist as dist
import util.misc as utils
from datasets import build_dataset, get_coco_api_from_dataset
from datasets.clevrref import ClevrRefEvaluator
from datasets.coco_eval import CocoEvaluator
from datasets.flickr_eval import FlickrEvaluator
from datasets.phrasecut_eval import PhrasecutEvaluator
from datasets.refexp import RefExpEvaluator
from engine import evaluate, train_one_epoch
from models import build_model
from models.postprocessors import build_postprocessors
# ----- 加上这个后就没报错 ----- #
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
def get_args_parser():
parser = argparse.ArgumentParser("Set transformer detector", add_help=False)
parser.add_argument("--run_name", default="", type=str)
# Dataset specific
parser.add_argument("--dataset_config", default=None, required=True)
parser.add_argument("--do_qa", action="store_true", help="Whether to do question answering")
parser.add_argument(
"--predict_final",
action="store_true",
help="If true, will predict if a given box is in the actual referred set. Useful for CLEVR-Ref+ only currently.",
)
parser.add_argument("--no_detection", action="store_true", help="Whether to train the detector")
parser.add_argument(
"--split_qa_heads", action="store_true", help="Whether to use a separate head per question type in vqa"
)
parser.add_argument(
"--combine_datasets", nargs="+", help="List of datasets to combine for training", default=["flickr"]
)
parser.add_argument(
"--combine_datasets_val", nargs="+", help="List of datasets to combine for eval", default=["flickr"]
)
parser.add_argument("--coco_path", type=str, default="")
parser.add_argument("--vg_img_path", type=str, default="")
parser.add_argument("--vg_ann_path", type=str, default="")
parser.add_argument("--clevr_img_path", type=str, default="")
parser.add_argument("--clevr_ann_path", type=str, default="")
parser.add_argument("--phrasecut_ann_path", type=str, default="")
parser.add_argument(
"--phrasecut_orig_ann_path",
type=str,
default="",
)
parser.add_argument("--modulated_lvis_ann_path", type=str, default="")
# Training hyper-parameters
parser.add_argument("--lr", default=1e-4, type=float)
parser.add_argument("--lr_backbone", default=1e-5, type=float)
parser.add_argument("--text_encoder_lr", default=5e-5, type=float)
parser.add_argument("--batch_size", default=2, type=int)
parser.add_argument("--weight_decay", default=1e-4, type=float)
parser.add_argument("--epochs", default=40, type=int)
parser.add_argument("--lr_drop", default=35, type=int)
parser.add_argument(
"--epoch_chunks",
default=-1,
type=int,
help="If greater than 0, will split the training set into chunks and validate/checkpoint after each chunk",
)
parser.add_argument("--optimizer", default="adam", type=str)
parser.add_argument("--clip_max_norm", default=0.1, type=float, help="gradient clipping max norm")
parser.add_argument(
"--eval_skip",
default=1,
type=int,
help='do evaluation every "eval_skip" frames',
)
parser.add_argument(
"--schedule",
default="linear_with_warmup",
type=str,
choices=("step", "multistep", "linear_with_warmup", "all_linear_with_warmup"),
)
parser.add_argument("--ema", action="store_true")
parser.add_argument("--ema_decay", type=float, default=0.9998)
parser.add_argument("--fraction_warmup_steps", default=0.01, type=float, help="Fraction of total number of steps")
# Model parameters
parser.add_argument(
"--frozen_weights",
type=str,
default=None,
help="Path to the pretrained model. If set, only the mask head will be trained",
)
parser.add_argument(
"--freeze_text_encoder", action="store_true", help="Whether to freeze the weights of the text encoder"
)
parser.add_argument(
"--text_encoder_type",
default="roberta-base",
choices=("roberta-base", "distilroberta-base", "roberta-large"),
)
# Backbone
parser.add_argument(
"--backbone",
default="resnet101",
type=str,
help="Name of the convolutional backbone to use such as resnet50 resnet101 timm_tf_efficientnet_b3_ns",
)
parser.add_argument(
"--dilation",
action="store_true",
help="If true, we replace stride with dilation in the last convolutional block (DC5)",
)
parser.add_argument(
"--position_embedding",
default="sine",
type=str,
choices=("sine", "learned"),
help="Type of positional embedding to use on top of the image features",
)
# Transformer
parser.add_argument(
"--enc_layers",
default=6,
type=int,
help="Number of encoding layers in the transformer",
)
parser.add_argument(
"--dec_layers",
default=6,
type=int,
help="Number of decoding layers in the transformer",
)
parser.add_argument(
"--dim_feedforward",
default=2048,
type=int,
help="Intermediate size of the feedforward layers in the transformer blocks",
)
parser.add_argument(
"--hidden_dim",
default=256,
type=int,
help="Size of the embeddings (dimension of the transformer)",
)
parser.add_argument("--dropout", default=0.1, type=float, help="Dropout applied in the transformer")
parser.add_argument(
"--nheads",
default=8,
type=int,
help="Number of attention heads inside the transformer's attentions",
)
parser.add_argument("--num_queries", default=100, type=int, help="Number of query slots")
parser.add_argument("--pre_norm", action="store_true")
parser.add_argument(
"--no_pass_pos_and_query",
dest="pass_pos_and_query",
action="store_false",
help="Disables passing the positional encodings to each attention layers",
)
# Segmentation
parser.add_argument(
"--mask_model",
default="none",
type=str,
choices=("none", "smallconv", "v2"),
help="Segmentation head to be used (if None, segmentation will not be trained)",
)
parser.add_argument("--remove_difficult", action="store_true")
parser.add_argument("--masks", action="store_true")
# Loss
parser.add_argument(
"--no_aux_loss",
dest="aux_loss",
action="store_false",
help="Disables auxiliary decoding losses (loss at each layer)",
)
parser.add_argument(
"--set_loss",
default="hungarian",
type=str,
choices=("sequential", "hungarian", "lexicographical"),
help="Type of matching to perform in the loss",
)
parser.add_argument("--contrastive_loss", action="store_true", help="Whether to add contrastive loss")
parser.add_argument(
"--no_contrastive_align_loss",
dest="contrastive_align_loss",
action="store_false",
help="Whether to add contrastive alignment loss",
)
parser.add_argument(
"--contrastive_loss_hdim",
type=int,
default=64,
help="Projection head output size before computing normalized temperature-scaled cross entropy loss",
)
parser.add_argument(
"--temperature_NCE", type=float, default=0.07, help="Temperature in the temperature-scaled cross entropy loss"
)
# * Matcher
parser.add_argument(
"--set_cost_class",
default=1,
type=float,
help="Class coefficient in the matching cost",
)
parser.add_argument(
"--set_cost_bbox",
default=5,
type=float,
help="L1 box coefficient in the matching cost",
)
parser.add_argument(
"--set_cost_giou",
default=2,
type=float,
help="giou box coefficient in the matching cost",
)
# Loss coefficients
parser.add_argument("--ce_loss_coef", default=1, type=float)
parser.add_argument("--mask_loss_coef", default=1, type=float)
parser.add_argument("--dice_loss_coef", default=1, type=float)
parser.add_argument("--bbox_loss_coef", default=5, type=float)
parser.add_argument("--giou_loss_coef", default=2, type=float)
parser.add_argument("--qa_loss_coef", default=1, type=float)
parser.add_argument(
"--eos_coef",
default=0.1,
type=float,
help="Relative classification weight of the no-object class",
)
parser.add_argument("--contrastive_loss_coef", default=0.1, type=float)
parser.add_argument("--contrastive_align_loss_coef", default=1, type=float)
# Run specific
parser.add_argument("--test", action="store_true", help="Whether to run evaluation on val or test set")
parser.add_argument("--test_type", type=str, default="test", choices=("testA", "testB", "test"))
parser.add_argument("--output-dir", default="", help="path where to save, empty for no saving")
parser.add_argument("--device", default="cuda", help="device to use for training / testing")
parser.add_argument("--seed", default=42, type=int)
parser.add_argument("--resume", default="", help="resume from checkpoint")
parser.add_argument("--load", default="", help="resume from checkpoint")
parser.add_argument("--start-epoch", default=0, type=int, metavar="N", help="start epoch")
parser.add_argument("--eval", action="store_true", help="Only run evaluation")
parser.add_argument("--num_workers", default=5, type=int)
# Distributed training parameters
parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes")
parser.add_argument("--dist-url", default="env://", help="url used to set up distributed training")
return parser
def main(args):
# Init distributed mode
dist.init_distributed_mode(args)
# Update dataset specific configs
if args.dataset_config is not None:
# https://stackoverflow.com/a/16878364
d = vars(args)
with open(args.dataset_config, "r") as f:
cfg = json.load(f)
d.update(cfg)
print("git:\n {}\n".format(utils.get_sha()))
# Segmentation related
if args.mask_model != "none":
args.masks = True
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
print(args)
device = torch.device(args.device)
output_dir = Path(args.output_dir)
# fix the seed for reproducibility
seed = args.seed + dist.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.set_deterministic(True)
# Build the model
model, criterion, contrastive_criterion, qa_criterion, weight_dict = build_model(args)
model.to(device)
assert (
criterion is not None or qa_criterion is not None
), "Error: should train either detection or question answering (or both)"
# Get a copy of the model for exponential moving averaged version of the model
model_ema = deepcopy(model) if args.ema else None
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("number of params:", n_parameters)
# Set up optimizers
param_dicts = [
{
"params": [
p
for n, p in model_without_ddp.named_parameters()
if "backbone" not in n and "text_encoder" not in n and p.requires_grad
]
},
{
"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad],
"lr": args.lr_backbone,
},
{
"params": [p for n, p in model_without_ddp.named_parameters() if "text_encoder" in n and p.requires_grad],
"lr": args.text_encoder_lr,
},
]
if args.optimizer == "sgd":
optimizer = torch.optim.SGD(param_dicts, lr=args.lr, momentum=0.9, weight_decay=args.weight_decay)
elif args.optimizer in ["adam", "adamw"]:
optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, weight_decay=args.weight_decay)
else:
raise RuntimeError(f"Unsupported optimizer {args.optimizer}")
# Train dataset
if len(args.combine_datasets) == 0 and not args.eval:
raise RuntimeError("Please provide at least one training dataset")
dataset_train, sampler_train, data_loader_train = None, None, None
if not args.eval:
dataset_train = ConcatDataset(
[build_dataset(name, image_set="train", args=args) for name in args.combine_datasets]
)
# To handle very big datasets, we chunk it into smaller parts.
if args.epoch_chunks > 0:
print(
"Splitting the training set into {args.epoch_chunks} of size approximately "
f" {len(dataset_train) // args.epoch_chunks}"
)
chunks = torch.chunk(torch.arange(len(dataset_train)), args.epoch_chunks)
datasets = [torch.utils.data.Subset(dataset_train, chunk.tolist()) for chunk in chunks]
if args.distributed:
samplers_train = [DistributedSampler(ds) for ds in datasets]
else:
samplers_train = [torch.utils.data.RandomSampler(ds) for ds in datasets]
batch_samplers_train = [
torch.utils.data.BatchSampler(sampler_train, args.batch_size, drop_last=True)
for sampler_train in samplers_train
]
assert len(batch_samplers_train) == len(datasets)
data_loaders_train = [
DataLoader(
ds,
batch_sampler=batch_sampler_train,
collate_fn=partial(utils.collate_fn, False),
num_workers=args.num_workers,
)
for ds, batch_sampler_train in zip(datasets, batch_samplers_train)
]
else:
if args.distributed:
sampler_train = DistributedSampler(dataset_train)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
batch_sampler_train = torch.utils.data.BatchSampler(sampler_train, args.batch_size, drop_last=True)
data_loader_train = DataLoader(
dataset_train,
batch_sampler=batch_sampler_train,
collate_fn=partial(utils.collate_fn, False),
num_workers=args.num_workers,
)
# Val dataset
if len(args.combine_datasets_val) == 0:
raise RuntimeError("Please provide at leas one validation dataset")
Val_all = namedtuple(typename="val_data", field_names=["dataset_name", "dataloader", "base_ds", "evaluator_list"])
val_tuples = []
for dset_name in args.combine_datasets_val:
dset = build_dataset(dset_name, image_set="val", args=args)
sampler = (
DistributedSampler(dset, shuffle=False) if args.distributed else torch.utils.data.SequentialSampler(dset)
)
dataloader = DataLoader(
dset,
args.batch_size,
sampler=sampler,
drop_last=False,
collate_fn=partial(utils.collate_fn, False),
num_workers=args.num_workers,
)
base_ds = get_coco_api_from_dataset(dset)
val_tuples.append(Val_all(dataset_name=dset_name, dataloader=dataloader, base_ds=base_ds, evaluator_list=None))
if args.frozen_weights is not None:
if args.resume.startswith("https"):
checkpoint = torch.hub.load_state_dict_from_url(args.resume, map_location="cpu", check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location="cpu")
if "model_ema" in checkpoint and checkpoint["model_ema"] is not None:
model_without_ddp.detr.load_state_dict(checkpoint["model_ema"], strict=False)
else:
model_without_ddp.detr.load_state_dict(checkpoint["model"], strict=False)
if args.ema:
model_ema = deepcopy(model_without_ddp)
# Used for loading weights from another model and starting a training from scratch. Especially useful if
# loading into a model with different functionality.
if args.load:
print("loading from", args.load)
checkpoint = torch.load(args.load, map_location="cpu")
if "model_ema" in checkpoint:
model_without_ddp.load_state_dict(checkpoint["model_ema"], strict=False)
else:
model_without_ddp.load_state_dict(checkpoint["model"], strict=False)
if args.ema:
model_ema = deepcopy(model_without_ddp)
# Used for resuming training from the checkpoint of a model. Used when training times-out or is pre-empted.
if args.resume:
if args.resume.startswith("https"):
checkpoint = torch.hub.load_state_dict_from_url(args.resume, map_location="cpu", check_hash=True)
else:
# 按照 LXMERT 加载模型
checkpoint = torch.load(args.resume, map_location="cpu")
# for key in list(checkpoint.keys()):
# if '.module' in key:
# checkpoint[key.replace('.module', '')] = checkpoint.pop(key)
model_without_ddp.load_state_dict(checkpoint["model"])
if not args.eval and "optimizer" in checkpoint and "epoch" in checkpoint:
optimizer.load_state_dict(checkpoint["optimizer"])
args.start_epoch = checkpoint["epoch"] + 1
if args.ema:
if "model_ema" not in checkpoint:
print("WARNING: ema model not found in checkpoint, resetting to current model")
model_ema = deepcopy(model_without_ddp)
else:
model_ema.load_state_dict(checkpoint["model_ema"])
def build_evaluator_list(base_ds, dataset_name):
"""Helper function to build the list of evaluators for a given dataset"""
evaluator_list = []
if args.no_detection:
return evaluator_list
iou_types = ["bbox"]
if args.masks:
iou_types.append("segm")
evaluator_list.append(CocoEvaluator(base_ds, tuple(iou_types), useCats=False))
if "refexp" in dataset_name:
evaluator_list.append(RefExpEvaluator(base_ds, ("bbox")))
if "clevrref" in dataset_name:
evaluator_list.append(ClevrRefEvaluator(base_ds, ("bbox")))
if "flickr" in dataset_name:
evaluator_list.append(
FlickrEvaluator(
args.flickr_dataset_path,
subset="test" if args.test else "val",
merge_boxes=args.GT_type == "merged",
)
)
if "phrasecut" in dataset_name:
evaluator_list.append(
PhrasecutEvaluator(
"test" if args.test else "miniv",
ann_folder=args.phrasecut_orig_ann_path,
output_dir=os.path.join(output_dir, "phrasecut_eval"),
eval_mask=args.masks,
)
)
return evaluator_list
# Runs only evaluation, by default on the validation set unless --test is passed.
if args.eval:
test_stats = {}
test_model = model_ema if model_ema is not None else model
for i, item in enumerate(val_tuples):
evaluator_list = build_evaluator_list(item.base_ds, item.dataset_name)
postprocessors = build_postprocessors(args, item.dataset_name)
item = item._replace(evaluator_list=evaluator_list)
print(f"Evaluating {item.dataset_name}")
curr_test_stats = evaluate(
model=test_model,
criterion=criterion,
contrastive_criterion=contrastive_criterion,
qa_criterion=qa_criterion,
postprocessors=postprocessors,
weight_dict=weight_dict,
data_loader=item.dataloader,
evaluator_list=item.evaluator_list,
device=device,
args=args,
)
test_stats.update({item.dataset_name + "_" + k: v for k, v in curr_test_stats.items()})
log_stats = {
**{f"test_{k}": v for k, v in test_stats.items()},
"n_parameters": n_parameters,
}
print(log_stats)
return
# Runs training and evaluates after every --eval_skip epochs
print("Start training")
start_time = time.time()
best_metric = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.epoch_chunks > 0:
sampler_train = samplers_train[epoch % len(samplers_train)]
data_loader_train = data_loaders_train[epoch % len(data_loaders_train)]
print(f"Starting epoch {epoch // len(data_loaders_train)}, sub_epoch {epoch % len(data_loaders_train)}")
else:
print(f"Starting epoch {epoch}")
if args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(
model=model,
criterion=criterion,
contrastive_criterion=contrastive_criterion,
qa_criterion=qa_criterion,
data_loader=data_loader_train,
weight_dict=weight_dict,
optimizer=optimizer,
device=device,
epoch=epoch,
args=args,
max_norm=args.clip_max_norm,
model_ema=model_ema,
)
if args.output_dir:
checkpoint_paths = [output_dir / "checkpoint.pth"]
# extra checkpoint before LR drop and every 2 epochs
if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 2 == 0:
checkpoint_paths.append(output_dir / f"checkpoint{epoch:04}.pth")
for checkpoint_path in checkpoint_paths:
dist.save_on_master(
{
"model": model_without_ddp.state_dict(),
"model_ema": model_ema.state_dict() if args.ema else None,
"optimizer": optimizer.state_dict(),
"epoch": epoch,
"args": args,
},
checkpoint_path,
)
if epoch % args.eval_skip == 0:
test_stats = {}
test_model = model_ema if model_ema is not None else model
for i, item in enumerate(val_tuples):
evaluator_list = build_evaluator_list(item.base_ds, item.dataset_name)
item = item._replace(evaluator_list=evaluator_list)
postprocessors = build_postprocessors(args, item.dataset_name)
print(f"Evaluating {item.dataset_name}")
curr_test_stats = evaluate(
model=test_model,
criterion=criterion,
contrastive_criterion=contrastive_criterion,
qa_criterion=qa_criterion,
postprocessors=postprocessors,
weight_dict=weight_dict,
data_loader=item.dataloader,
evaluator_list=item.evaluator_list,
device=device,
args=args,
)
test_stats.update({item.dataset_name + "_" + k: v for k, v in curr_test_stats.items()})
else:
test_stats = {}
log_stats = {
**{f"train_{k}": v for k, v in train_stats.items()},
**{f"test_{k}": v for k, v in test_stats.items()},
"epoch": epoch,
"n_parameters": n_parameters,
}
if args.output_dir and dist.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
if epoch % args.eval_skip == 0:
if args.do_qa:
metric = test_stats["gqa_accuracy_answer_total_unscaled"]
else:
metric = np.mean([v[1] for k, v in test_stats.items() if "coco_eval_bbox" in k])
if args.output_dir and metric > best_metric:
best_metric = metric
checkpoint_paths = [output_dir / "BEST_checkpoint.pth"]
# extra checkpoint before LR drop and every 100 epochs
for checkpoint_path in checkpoint_paths:
dist.save_on_master(
{
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"epoch": epoch,
"args": args,
},
checkpoint_path,
)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print("Training time {}".format(total_time_str))
if __name__ == "__main__":
parser = argparse.ArgumentParser("DETR training and evaluation script", parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| 39.675227 | 121 | 0.62193 |
83a0a9054cf38560a71c6f6f0182a6a0c0245c8d
| 13,134 |
py
|
Python
|
inferelator/utils/loader.py
|
meichenfang/inferelator
|
47f8ebcc5f303264a75814897c52026b47c57aef
|
[
"BSD-2-Clause"
] | null | null | null |
inferelator/utils/loader.py
|
meichenfang/inferelator
|
47f8ebcc5f303264a75814897c52026b47c57aef
|
[
"BSD-2-Clause"
] | null | null | null |
inferelator/utils/loader.py
|
meichenfang/inferelator
|
47f8ebcc5f303264a75814897c52026b47c57aef
|
[
"BSD-2-Clause"
] | null | null | null |
import pandas as pd
import pandas.api.types as pat
import numpy as np
import os
import copy as cp
import anndata
from inferelator.utils.data import InferelatorData
from inferelator.utils.debug import Debug
from inferelator.preprocessing.metadata_parser import MetadataHandler
DEFAULT_PANDAS_TSV_SETTINGS = dict(sep="\t", index_col=0, header=0)
DEFAULT_METADATA = "branching"
_TENX_MTX = ("matrix.mtx.gz", "matrix.mtx")
_TENX_BARCODES = ("barcodes.tsv.gz", "barcodes.tsv")
_TENX_FEATURES = ("features.tsv.gz", "genes.tsv")
class InferelatorDataLoader(object):
input_dir = None
_file_format_settings = None
def __init__(self, input_dir, file_format_settings=None):
self.input_dir = input_dir
self._file_format_settings = file_format_settings
def load_data_h5ad(self, h5ad_file, meta_data_file=None, meta_data_handler=DEFAULT_METADATA, gene_data_file=None,
gene_name_column=None, use_layer=None):
data = anndata.read_h5ad(self.input_path(h5ad_file))
if meta_data_file is None and data.obs.shape[1] > 0:
meta_data = None
else:
meta_data = self.load_metadata_tsv(meta_data_file, data.obs_names, meta_data_handler=meta_data_handler)
gene_metadata = self.load_gene_metadata_tsv(gene_data_file, gene_name_column)
if use_layer is not None and use_layer not in data.layers:
msg = "Layer {lay} is not in {f}".format(lay=use_layer, f=h5ad_file)
raise ValueError(msg)
# Build an InferelatorData object from a layer
elif use_layer is not None:
data = InferelatorData(data.layers[use_layer],
gene_names=data.var_names,
sample_names=data.obs_names,
meta_data=pd.concat((data.obs, meta_data), axis=1),
gene_data=pd.concat((data.var, gene_metadata), axis=1))
# Build an InferelatorData object from everything
else:
data = InferelatorData(data,
meta_data=meta_data,
gene_data=gene_metadata)
# Make sure bytestrings are decoded
_safe_dataframe_decoder(data.gene_data)
_safe_dataframe_decoder(data.meta_data)
self._check_loaded_data(data, filename=h5ad_file)
return data
def load_data_mtx(self, mtx_file, mtx_obs=None, mtx_feature=None, meta_data_file=None,
meta_data_handler=DEFAULT_METADATA, gene_data_file=None, gene_name_column=None):
data = anndata.read_mtx(self.input_path(mtx_file))
row_names = self._load_list_from_file(self.input_path(mtx_obs)) if mtx_obs is not None else None
col_names = self._load_list_from_file(self.input_path(mtx_feature)) if mtx_feature is not None else None
meta_data = self.load_metadata_tsv(meta_data_file, data.obs_names, meta_data_handler=meta_data_handler)
gene_metadata = self.load_gene_metadata_tsv(gene_data_file, gene_name_column)
data = InferelatorData(data,
meta_data=meta_data,
gene_data=gene_metadata,
sample_names=row_names,
gene_names=col_names)
return data
def load_data_hdf5(self, hdf5_file, use_layer=None, meta_data_file=None, meta_data_handler=DEFAULT_METADATA,
gene_data_file=None, gene_name_column=None, transpose_expression_data=False):
data = pd.HDFStore(self.input_path(hdf5_file), mode='r')
data = data[data.keys()[0]] if use_layer is None else data[use_layer]
meta_data = self.load_metadata_tsv(meta_data_file, data.index, meta_data_handler=meta_data_handler)
gene_metadata = self.load_gene_metadata_tsv(gene_data_file, gene_name_column)
data = data.transpose() if transpose_expression_data else data
data = InferelatorData(data,
meta_data=meta_data,
gene_data=gene_metadata)
# Make sure bytestrings are decoded
_safe_dataframe_decoder(data.gene_data)
_safe_dataframe_decoder(data.meta_data)
return data
def load_data_tenx(self, tenx_path, meta_data_file=None, meta_data_handler=DEFAULT_METADATA, gene_data_file=None,
gene_name_column=None):
mtx_file, mtx_obs, mtx_feature = None, None, None
for datafile in _TENX_MTX:
if self._file_exists(self.filename_path_join(tenx_path, datafile)):
mtx_file = self.filename_path_join(tenx_path, datafile)
for datafile in _TENX_BARCODES:
if self._file_exists(self.filename_path_join(tenx_path, datafile)):
mtx_obs = self.filename_path_join(tenx_path, datafile)
for datafile in _TENX_FEATURES:
if self._file_exists(self.filename_path_join(tenx_path, datafile)):
mtx_feature = self.filename_path_join(tenx_path, datafile)
if mtx_file is None:
msg = "Cannot find 10x files ({f}) in path ({p})".format(f=" or ".join(_TENX_MTX), p=tenx_path)
raise FileNotFoundError(msg)
else:
return self.load_data_mtx(mtx_file, mtx_obs=mtx_obs, mtx_feature=mtx_feature, meta_data_file=meta_data_file,
meta_data_handler=meta_data_handler, gene_data_file=gene_data_file,
gene_name_column=gene_name_column)
def load_data_tsv(self, expression_matrix_file, transpose_expression_data=False, meta_data_file=None,
meta_data_handler=DEFAULT_METADATA, expression_matrix_metadata=None, gene_data_file=None,
gene_name_column=None):
Debug.vprint("Loading expression data file {file}".format(file=expression_matrix_file), level=0)
# Load expression data
data = self.input_dataframe(expression_matrix_file)
if expression_matrix_metadata is not None:
meta_cols = data.columns.intersection(expression_matrix_metadata)
slice_meta_data = data.loc[:, meta_cols].copy()
data = data.drop(meta_cols, axis=1)
else:
slice_meta_data = None
if meta_data_file is None and slice_meta_data is not None:
meta_data = None
else:
sample_labels = data.columns if transpose_expression_data else data.index
meta_data = self.load_metadata_tsv(meta_data_file, sample_labels, meta_data_handler=meta_data_handler)
meta_data = pd.concat((meta_data, slice_meta_data), axis=1)
gene_metadata = self.load_gene_metadata_tsv(gene_data_file, gene_name_column)
# Pack all data structures into an InferelatorData object
data = InferelatorData(data,
transpose_expression=transpose_expression_data,
meta_data=meta_data,
gene_data=gene_metadata)
self._check_loaded_data(data, filename=expression_matrix_file)
return data
def load_metadata_tsv(self, meta_data_file, sample_labels, meta_data_handler=None):
# Load metadata
meta_data_handler = MetadataHandler.get_handler(meta_data_handler)
if meta_data_file is not None:
Debug.vprint("Loading metadata file {file}".format(file=meta_data_file), level=0)
meta_data = meta_data_handler.check_loaded_meta_data(self.input_dataframe(meta_data_file, index_col=None))
else:
Debug.vprint("No metadata provided. Creating a generic metadata", level=0)
meta_data = meta_data_handler.create_default_meta_data(sample_labels)
return meta_data
def load_gene_metadata_tsv(self, gene_data_file, gene_name_column):
# Load gene metadata
if gene_data_file is None and gene_name_column is None:
return None
elif gene_data_file is None or gene_name_column is None:
raise ValueError("Gene_metadata_file and gene_list_index must both be set if either is")
Debug.vprint("Loading gene metadata from file {file}".format(file=gene_data_file), level=0)
gene_metadata = self.input_dataframe(gene_data_file, index_col=None)
# Validate that the gene_metadata can be properly read, if loaded
if gene_name_column in gene_metadata:
gene_metadata.index = gene_metadata[gene_name_column]
else:
msg = "Column {c} not found in gene data file [{h}]".format(c=gene_name_column,
h=" ".join(gene_metadata.columns))
raise ValueError(msg)
return gene_metadata
def input_dataframe(self, filename, **kwargs):
"""
Read a file in as a pandas dataframe
"""
Debug.vprint("Loading data file: {a}".format(a=self.input_path(filename)), level=2)
# Use any kwargs for this function and any file settings from default
if self._file_format_settings is not None and filename in self._file_format_settings:
file_settings = self._file_format_settings[filename]
else:
file_settings = cp.copy(DEFAULT_PANDAS_TSV_SETTINGS)
file_settings.update(kwargs)
# Load a dataframe
return pd.read_csv(self.input_path(filename), **file_settings)
def input_path(self, filename):
"""
Join filename to input_dir
:param filename: Path to some file that needs to be attached to the input path
:type filename: str
:return: File joined to input_dir instance variable
:rtype: str
"""
return self.filename_path_join(self.input_dir, filename)
@staticmethod
def _file_exists(filename):
return filename is not None and os.path.isfile(filename)
@staticmethod
def _load_list_from_file(filename):
return pd.read_csv(filename, sep="\t", header=None)[0].tolist() if filename is not None else None
@staticmethod
def _check_loaded_data(data, filename=None):
msg = "Loaded {f}:\n".format(f=filename) if filename is not None else ""
nnf, non_finite_genes = data.non_finite
if nnf > 0:
msg += "\t{n} genes with non-finite expression ({g})\n".format(n=nnf, g=" ".join(non_finite_genes))
msg += "Data loaded: {dt}".format(dt=str(data))
Debug.vprint(msg, level=0)
@staticmethod
def filename_path_join(path, filename):
"""
Join filename to path
"""
# Raise an error if filename is None
if filename is None:
raise ValueError("Cannot create a path to a filename set as None")
# Return an absolute path unchanged
elif os.path.isabs(filename):
return filename
# If path is set, join filename to it and return that
elif path is not None:
return InferelatorDataLoader.make_path_safe(os.path.join(path, filename))
# If path is not set, convert the filename to absolute and return it
else:
return InferelatorDataLoader.make_path_safe(filename)
@staticmethod
def make_path_safe(path):
"""
Expand relative paths to absolute paths. Pass None through.
:param path: str
:return: str
"""
if path is not None:
return os.path.abspath(os.path.expanduser(path))
else:
return None
def _safe_dataframe_decoder(data_frame, encoding='utf-8'):
"""
Decode dataframe bytestrings
:param data_frame: pd.DataFrame
"""
if _is_dtype_object(data_frame.index.dtype):
data_frame.index = _decode_series(data_frame.index, encoding=encoding)
if _is_dtype_object(data_frame.columns.dtype):
data_frame.columns = _decode_series(data_frame.columns, encoding=encoding)
for col in data_frame.columns:
if _is_dtype_object(data_frame[col].dtype):
data_frame[col] = _decode_series(data_frame[col], encoding=encoding)
def _is_dtype_object(dtype):
if pat.is_object_dtype(dtype):
return True
elif pat.is_categorical_dtype(dtype):
return pat.is_object_dtype(dtype.categories.dtype)
else:
return False
def _decode_series(series, encoding):
"""
Decode and return a series or index object from pandas
:param series: pd.Series, pd.Index
:param encoding: str
:return: pd.Series, pd.Index
"""
if pat.is_categorical_dtype(series):
series.cat.categories = _decode_series(series.dtype.categories, encoding=encoding)
return series
_new_series = series.str.decode(encoding).values
_no_decode = pd.isna(_new_series)
if np.all(_no_decode):
return series
_new_series[_no_decode] = series.values[_no_decode]
try:
new_series = pd.Series(_new_series, index=series.index)
except AttributeError:
new_series = pd.Index(_new_series)
return new_series
| 39.679758 | 120 | 0.661185 |
e4794a6a504717bff8e3ebc7adf10c59f710b304
| 4,675 |
py
|
Python
|
bin/Python27/Lib/site-packages/scipy/weave/_dumbdbm_patched.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
bin/Python27/Lib/site-packages/scipy/weave/_dumbdbm_patched.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
bin/Python27/Lib/site-packages/scipy/weave/_dumbdbm_patched.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
"""A dumb and slow but simple dbm clone.
For database spam, spam.dir contains the index (a text file),
spam.bak *may* contain a backup of the index (also a text file),
while spam.dat contains the data (a binary file).
XXX TO DO:
- seems to contain a bug when updating...
- reclaim free space (currently, space once occupied by deleted or expanded
items is never reused)
- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)
- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)
- support opening for read-only (flag = 'm')
"""
from __future__ import division, print_function, absolute_import
_os = __import__('os')
from scipy._lib.six import builtins
from scipy._lib.six import string_types
_open = builtins.open
_BLOCKSIZE = 512
error = IOError # For anydbm
class _Database(object):
def __init__(self, file):
self._dirfile = file + '.dir'
self._datfile = file + '.dat'
self._bakfile = file + '.bak'
# Mod by Jack: create data file if needed
try:
f = _open(self._datfile, 'r')
except IOError:
f = _open(self._datfile, 'w')
f.close()
self._update()
def _update(self):
import string
self._index = {}
try:
f = _open(self._dirfile)
except IOError:
pass
else:
while 1:
line = string.rstrip(f.readline())
if not line:
break
key, (pos, siz) = eval(line)
self._index[key] = (pos, siz)
f.close()
def _commit(self):
try:
_os.unlink(self._bakfile)
except _os.error:
pass
try:
_os.rename(self._dirfile, self._bakfile)
except _os.error:
pass
f = _open(self._dirfile, 'w')
for key, (pos, siz) in self._index.items():
f.write("%s, (%s, %s)\n" % (repr(key), repr(pos), repr(siz)))
f.close()
def __getitem__(self, key):
pos, siz = self._index[key] # may raise KeyError
f = _open(self._datfile, 'rb')
f.seek(pos)
dat = f.read(siz)
f.close()
return dat
def __contains__(self, key):
return key in self._index
def _addval(self, val):
f = _open(self._datfile, 'rb+')
f.seek(0, 2)
pos = f.tell()
## Does not work under MW compiler
## pos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
## f.seek(pos)
npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
f.write('\0'*(npos-pos))
pos = npos
f.write(val)
f.close()
return (pos, len(val))
def _setval(self, pos, val):
f = _open(self._datfile, 'rb+')
f.seek(pos)
f.write(val)
f.close()
return (pos, len(val))
def _addkey(self, key, pos_and_siz):
(pos, siz) = pos_and_siz
self._index[key] = (pos, siz)
f = _open(self._dirfile, 'a')
f.write("%s, (%s, %s)\n" % (repr(key), repr(pos), repr(siz)))
f.close()
def __setitem__(self, key, val):
if not isinstance(key, string_types) or not isinstance(val, string_types):
raise TypeError("keys and values must be strings")
if key not in self._index:
(pos, siz) = self._addval(val)
self._addkey(key, (pos, siz))
else:
pos, siz = self._index[key]
oldblocks = (siz + _BLOCKSIZE - 1) // _BLOCKSIZE
newblocks = (len(val) + _BLOCKSIZE - 1) // _BLOCKSIZE
if newblocks <= oldblocks:
pos, siz = self._setval(pos, val)
self._index[key] = pos, siz
else:
pos, siz = self._addval(val)
self._index[key] = pos, siz
self._addkey(key, (pos, siz))
def __delitem__(self, key):
del self._index[key]
self._commit()
def keys(self):
return list(self._index.keys())
def has_key(self, key):
return key in self._index
def __len__(self):
return len(self._index)
def close(self):
self._index = None
self._datfile = self._dirfile = self._bakfile = None
def open(file, flag=None, mode=None):
# flag, mode arguments are currently ignored
return _Database(file)
| 29.21875 | 83 | 0.541176 |
4e1891cd1749c4ee0b22794fe71cb53e6164319d
| 3,411 |
py
|
Python
|
highway_env/envs/graphics.py
|
sebastopol06/highway-env
|
3b946e42a635e224b3ad5b20817e68d2fc6787d7
|
[
"MIT"
] | null | null | null |
highway_env/envs/graphics.py
|
sebastopol06/highway-env
|
3b946e42a635e224b3ad5b20817e68d2fc6787d7
|
[
"MIT"
] | null | null | null |
highway_env/envs/graphics.py
|
sebastopol06/highway-env
|
3b946e42a635e224b3ad5b20817e68d2fc6787d7
|
[
"MIT"
] | null | null | null |
from __future__ import division, print_function, absolute_import
import os
import numpy as np
import pygame
from highway_env.road.graphics import WorldSurface, RoadGraphics
from highway_env.vehicle.graphics import VehicleGraphics
class EnvViewer(object):
"""
A viewer to render a highway driving environment.
"""
SCREEN_WIDTH = 1500#600
SCREEN_HEIGHT = 150
def __init__(self, env):
self.env = env
pygame.init()
pygame.display.set_caption("Highway-env")
panel_size = (self.SCREEN_WIDTH, self.SCREEN_HEIGHT)
self.screen = pygame.display.set_mode([self.SCREEN_WIDTH, self.SCREEN_HEIGHT])
self.sim_surface = WorldSurface(panel_size, 0, pygame.Surface(panel_size))
self.sim_surface.centering_position = env.config.get("centering_position", self.sim_surface.INITIAL_CENTERING)
self.clock = pygame.time.Clock()
self.enabled = True
if "SDL_VIDEODRIVER" in os.environ and os.environ["SDL_VIDEODRIVER"] == "dummy":
self.enabled = False
self.agent_display = None
self.agent_surface = None
def set_agent_display(self, agent_display):
if self.agent_display is None:
if self.SCREEN_WIDTH > self.SCREEN_HEIGHT:
self.screen = pygame.display.set_mode((self.SCREEN_WIDTH, 2 * self.SCREEN_HEIGHT))
else:
self.screen = pygame.display.set_mode((2 * self.SCREEN_WIDTH, self.SCREEN_HEIGHT))
self.agent_surface = pygame.Surface((self.SCREEN_WIDTH, self.SCREEN_HEIGHT))
self.agent_display = agent_display
def handle_events(self):
"""
Handle pygame events by forwarding them to the display and environment vehicle.
"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.env.close()
self.sim_surface.handle_event(event)
if self.env.vehicle:
VehicleGraphics.handle_event(self.env.vehicle, event)
def display(self):
"""
Display the road and vehicles on a pygame window.
"""
if not self.enabled:
return
self.sim_surface.move_display_window_to(self.window_position())
RoadGraphics.display(self.env.road, self.sim_surface)
RoadGraphics.display_traffic(self.env.road, self.sim_surface)
if self.agent_display:
self.agent_display(self.agent_surface, self.sim_surface)
if self.SCREEN_WIDTH > self.SCREEN_HEIGHT:
self.screen.blit(self.agent_surface, (0, self.SCREEN_HEIGHT))
else:
self.screen.blit(self.agent_surface, (self.SCREEN_WIDTH, 0))
self.screen.blit(self.sim_surface, (0, 0))
self.clock.tick(self.env.SIMULATION_FREQUENCY)
pygame.display.flip()
def get_image(self):
"""
:return: the rendered image as a rbg array
"""
data = pygame.surfarray.array3d(self.screen)
return np.moveaxis(data, 0, 1)
def window_position(self):
"""
:return: the world position of the center of the displayed window.
"""
if self.env.vehicle:
return self.env.vehicle.position
else:
return np.array([0, 0])
def close(self):
"""
Close the pygame window.
"""
pygame.quit()
| 33.772277 | 118 | 0.632952 |
d9c2080bd01455a8eb0a44b12305fbb3bac9baca
| 4,187 |
py
|
Python
|
share/seeds/generate-seeds.py
|
thorn79/Ember
|
88d1c0301002eaae559646963b769b2a03a3842c
|
[
"MIT"
] | 29 |
2017-08-18T02:53:57.000Z
|
2018-11-04T17:30:25.000Z
|
share/seeds/generate-seeds.py
|
thorn79/Ember
|
88d1c0301002eaae559646963b769b2a03a3842c
|
[
"MIT"
] | 63 |
2017-08-06T04:30:16.000Z
|
2021-01-16T16:07:35.000Z
|
share/seeds/generate-seeds.py
|
thorn79/Ember
|
88d1c0301002eaae559646963b769b2a03a3842c
|
[
"MIT"
] | 41 |
2017-08-30T13:18:21.000Z
|
2018-12-27T06:18:29.000Z
|
#!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef H_CHAINPARAMSSEEDS\n')
g.write('#define H_CHAINPARAMSSEEDS\n')
g.write('// List of fixed seed nodes for the bitcoin network\n')
g.write('// AUTOGENERATED by contrib/devtools/generate-seeds.py\n\n')
g.write('// Each line contains a 16-byte IPv6 address and a port.\n')
g.write('// IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 10024)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 10012)
g.write('#endif\n')
if __name__ == '__main__':
main()
| 31.719697 | 98 | 0.588488 |
bd7e39eddf98f0a117c367a7c248b3e453bbca25
| 418 |
py
|
Python
|
Products/CMFDefault/skins/zpt_content/source_html.py
|
zopefoundation/Products.CMFDefault
|
a176d9aac5a7e04725dbd0f7b76c6ac357062139
|
[
"ZPL-2.1"
] | null | null | null |
Products/CMFDefault/skins/zpt_content/source_html.py
|
zopefoundation/Products.CMFDefault
|
a176d9aac5a7e04725dbd0f7b76c6ac357062139
|
[
"ZPL-2.1"
] | 5 |
2017-07-13T00:51:25.000Z
|
2021-02-04T15:08:39.000Z
|
Products/CMFDefault/skins/zpt_content/source_html.py
|
zopefoundation/Products.CMFDefault
|
a176d9aac5a7e04725dbd0f7b76c6ac357062139
|
[
"ZPL-2.1"
] | 3 |
2017-07-08T03:22:35.000Z
|
2018-05-20T06:42:03.000Z
|
##parameters=
##
from Products.CMFDefault.utils import decode
options = {}
metadata = [ {'name': field[0], 'body': field[1]}
for field in context.getMetadataHeaders()
if field[0].lower() != 'title' ]
options['title'] = context.Title()
options['listMetadataFields'] = metadata
options['editable_body'] = context.EditableBody()
return context.source_html_template(**decode(options, script))
| 27.866667 | 62 | 0.681818 |
8761c4b33d464b46a16c10ce8753421d685010bc
| 2,387 |
py
|
Python
|
SSDBM_figures/document_vs_storage/plot_document_vs_storage.py
|
dennlinger/hypergraph-document-store
|
72b90119b163b92254c73442bee52cde55e58517
|
[
"MIT"
] | null | null | null |
SSDBM_figures/document_vs_storage/plot_document_vs_storage.py
|
dennlinger/hypergraph-document-store
|
72b90119b163b92254c73442bee52cde55e58517
|
[
"MIT"
] | 1 |
2019-12-12T09:20:00.000Z
|
2019-12-12T09:20:00.000Z
|
SSDBM_figures/document_vs_storage/plot_document_vs_storage.py
|
dennlinger/hypergraph-document-store
|
72b90119b163b92254c73442bee52cde55e58517
|
[
"MIT"
] | 1 |
2021-07-22T14:16:47.000Z
|
2021-07-22T14:16:47.000Z
|
"""
Simple test plot for now
"""
import matplotlib.pyplot as plt
import matplotlib
import json
def y_getter(key, scaler=1024**2):
return [0] + [v[key]/scaler for _, v in data.items()]
if __name__ == "__main__":
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
plt.rc('axes', titlesize=16)
plt.rc('axes', labelsize=16)
plt.rc('xtick', labelsize=14)
plt.rc('ytick', labelsize=14)
with open("document_vs_storage.json", "r") as f:
data = json.load(f)
# prepend 0, 0 to all values for "visual niceness"
x = [0] + [int(key) for key in data.keys()]
scaler = 1024**3
y_implicit = y_getter("implicit", scaler)
y_explicit = y_getter("explicit", scaler)
y_explicit_entity = y_getter("explicit_entity", scaler)
y_dyadic_entity = y_getter("dyadic_entity", scaler)
y_implicit_neo4j = y_getter("implicit_neo4j", scaler)
y_explicit_neo4j = y_getter("explicit_neo4j", scaler)
y_explicit_entity_neo4j = y_getter("explicit_entity_neo4j", scaler)
y_dyadic_entity_neo4j = y_getter("dyadic_entity_neo4j", scaler)
# Three plot dimensions:
# PSQL vs Neo4j: Marker
# Dataset: line dotted or not
# Model: Color
plt.figure()
plt.xlim([0, 120000])
plt.xlabel("Number of documents")
plt.ylabel("Storage size in GB")
plt.ylim([0, 12.5])
plt.plot(x, y_implicit, marker="o", color="#00996a", label="Implicit Full PSQL")
plt.plot(x, y_explicit, marker="o", color="#460d80", label="Explicit Full PSQL")
plt.plot(x, y_explicit_entity, marker="o", color="#460d80", linestyle="dashed", label="Explicit Entity PSQL")
plt.plot(x, y_dyadic_entity, marker="o", color="#ffa82d", linestyle="dashed", label="Dyadic Entity PSQL")
plt.plot(x, y_implicit_neo4j, marker="^", color="#00996a", label="Implicit Full Neo4j")
plt.plot(x, y_explicit_neo4j, marker="^", color="#460d80", label="Explicit Full Neo4j")
plt.plot(x, y_explicit_entity_neo4j, marker="^", color="#460d80", linestyle="dashed", label="Explicit Entity Neo4j")
plt.plot(x, y_dyadic_entity_neo4j, marker="^", color="#ffa82d", linestyle="dashed", label="Dyadic Entity Neo4j")
plt.xticks([0, 20000, 40000, 60000, 80000, 100000, 120000], ["0", "20k", "40k", "60k", "80k", "100k", "120k"])
plt.legend()
plt.savefig("document_vs_storage.pdf", dpi=300)
plt.show()
| 37.888889 | 120 | 0.670716 |
9d72168652a4c04048c9bc4ba3815b9e5953cc11
| 1,247 |
py
|
Python
|
callbacks.py
|
rgiglio/clt-app
|
28caf1c916c2b6f216357c446d5f1cd0582a9aff
|
[
"MIT"
] | null | null | null |
callbacks.py
|
rgiglio/clt-app
|
28caf1c916c2b6f216357c446d5f1cd0582a9aff
|
[
"MIT"
] | null | null | null |
callbacks.py
|
rgiglio/clt-app
|
28caf1c916c2b6f216357c446d5f1cd0582a9aff
|
[
"MIT"
] | null | null | null |
from dash import callback_context
from dash.dependencies import Input, Output, State
from dash_table import FormatTemplate
from dash_table.Format import Format
import dash
import backend as be
def register_callbacks(app):
@app.callback(
[Output('clt-output', 'figure'),
Output('table-output', 'columns'),
Output('table-output', 'data'),
Output('session', 'data')],
[Input('distribution', 'value'),
Input('number-of-balls', 'value'),
Input('sample-size', 'value'),
Input('addup', 'n_clicks'),
Input('addup1000', 'n_clicks')],
[State('session', 'data')],
)
def update_figure(distribution, number_of_balls, sample_size,
n_clicks_addup, n_clicks_addup1000, session_data):
if dash.callback_context.triggered[0]['prop_id'] == 'addup.n_clicks':
return be.update_figure(distribution, number_of_balls, 1, sample_size, session_data)
if dash.callback_context.triggered[0]['prop_id'] == 'addup1000.n_clicks':
return be.update_figure(distribution, number_of_balls, 1000, sample_size, session_data)
return be.update_figure(distribution, number_of_balls, 0, sample_size, session_data)
| 40.225806 | 99 | 0.664796 |
652fae89a989bb5e257b7d6c4a208eb35e4caa20
| 8,344 |
py
|
Python
|
nova/tests/fixtures.py
|
venusource/nova
|
0c6e6f180eebe71a3431abf726a0fd0c66578162
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/fixtures.py
|
venusource/nova
|
0c6e6f180eebe71a3431abf726a0fd0c66578162
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/fixtures.py
|
venusource/nova
|
0c6e6f180eebe71a3431abf726a0fd0c66578162
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fixtures for Nova tests."""
from __future__ import absolute_import
import gettext
import logging
import os
import uuid
import warnings
import fixtures
from oslo.config import cfg
from oslo.messaging import conffixture as messaging_conffixture
from nova.db import migration
from nova import rpc
from nova.db.sqlalchemy import api as session
from nova import service
_TRUE_VALUES = ('True', 'true', '1', 'yes')
CONF = cfg.CONF
DB_SCHEMA = ""
class ServiceFixture(fixtures.Fixture):
"""Run a service as a test fixture."""
def __init__(self, name, host=None, **kwargs):
name = name
host = host or uuid.uuid4().hex
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'nova-%s' % name)
self.kwargs = kwargs
def setUp(self):
super(ServiceFixture, self).setUp()
self.service = service.Service.create(**self.kwargs)
self.service.start()
self.addCleanup(self.service.kill)
class TranslationFixture(fixtures.Fixture):
"""Use gettext NullTranslation objects in tests."""
def setUp(self):
super(TranslationFixture, self).setUp()
nulltrans = gettext.NullTranslations()
gettext_fixture = fixtures.MonkeyPatch('gettext.translation',
lambda *x, **y: nulltrans)
self.gettext_patcher = self.useFixture(gettext_fixture)
class NullHandler(logging.Handler):
"""custom default NullHandler to attempt to format the record.
Used in conjunction with
log_fixture.get_logging_handle_error_fixture to detect formatting errors in
debug level logs without saving the logs.
"""
def handle(self, record):
self.format(record)
def emit(self, record):
pass
def createLock(self):
self.lock = None
class StandardLogging(fixtures.Fixture):
"""Setup Logging redirection for tests.
There are a number of things we want to handle with logging in tests:
* Redirect the logging to somewhere that we can test or dump it later.
* Ensure that as many DEBUG messages as possible are actually
executed, to ensure they are actually syntactically valid (they
often have not been).
* Ensure that we create useful output for tests that doesn't
overwhelm the testing system (which means we can't capture the
100 MB of debug logging on every run).
To do this we create a logger fixture at the root level, which
defaults to INFO and create a Null Logger at DEBUG which lets
us execute log messages at DEBUG but not keep the output.
To support local debugging OS_DEBUG=True can be set in the
environment, which will print out the full debug logging.
There are also a set of overrides for particularly verbose
modules to be even less than INFO.
"""
def setUp(self):
super(StandardLogging, self).setUp()
# set root logger to debug
root = logging.getLogger()
root.setLevel(logging.DEBUG)
# supports collecting debug level for local runs
if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
level = logging.DEBUG
else:
level = logging.INFO
# Collect logs
fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s'
self.logger = self.useFixture(
fixtures.FakeLogger(format=fs, level=None))
# TODO(sdague): why can't we send level through the fake
# logger? Tests prove that it breaks, but it's worth getting
# to the bottom of.
root.handlers[0].setLevel(level)
if level > logging.DEBUG:
# Just attempt to format debug level logs, but don't save them
handler = NullHandler()
self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False))
handler.setLevel(logging.DEBUG)
# Don't log every single DB migration step
logging.getLogger(
'migrate.versioning.api').setLevel(logging.WARNING)
class OutputStreamCapture(fixtures.Fixture):
"""Capture output streams during tests.
This fixture captures errant printing to stderr / stdout during
the tests and lets us see those streams at the end of the test
runs instead. Useful to see what was happening during failed
tests.
"""
def setUp(self):
super(OutputStreamCapture, self).setUp()
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
self.out = self.useFixture(fixtures.StringStream('stdout'))
self.useFixture(
fixtures.MonkeyPatch('sys.stdout', self.out.stream))
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
self.err = self.useFixture(fixtures.StringStream('stderr'))
self.useFixture(
fixtures.MonkeyPatch('sys.stderr', self.err.stream))
@property
def stderr(self):
return self.err._details["stderr"].as_text()
@property
def stdout(self):
return self.out._details["stdout"].as_text()
class Timeout(fixtures.Fixture):
"""Setup per test timeouts.
In order to avoid test deadlocks we support setting up a test
timeout parameter read from the environment. In almost all
cases where the timeout is reached this means a deadlock.
A class level TIMEOUT_SCALING_FACTOR also exists, which allows
extremely long tests to specify they need more time.
"""
def __init__(self, timeout, scaling=1):
super(Timeout, self).__init__()
try:
self.test_timeout = int(timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
self.test_timeout = 0
if scaling >= 1:
self.test_timeout *= scaling
else:
raise ValueError('scaling value must be >= 1')
def setUp(self):
super(Timeout, self).setUp()
if self.test_timeout > 0:
self.useFixture(fixtures.Timeout(self.test_timeout, gentle=True))
class Database(fixtures.Fixture):
def _cache_schema(self):
global DB_SCHEMA
if not DB_SCHEMA:
engine = session.get_engine()
conn = engine.connect()
migration.db_sync()
DB_SCHEMA = "".join(line for line in conn.connection.iterdump())
engine.dispose()
def reset(self):
self._cache_schema()
engine = session.get_engine()
engine.dispose()
conn = engine.connect()
conn.connection.executescript(DB_SCHEMA)
def setUp(self):
super(Database, self).setUp()
self.reset()
class RPCFixture(fixtures.Fixture):
def __init__(self, *exmods):
super(RPCFixture, self).__init__()
self.exmods = []
self.exmods.extend(exmods)
def setUp(self):
super(RPCFixture, self).setUp()
self.addCleanup(rpc.cleanup)
rpc.add_extra_exmods(*self.exmods)
self.addCleanup(rpc.clear_extra_exmods)
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
self.useFixture(self.messaging_conf)
rpc.init(CONF)
class WarningsFixture(fixtures.Fixture):
"""Filters out warnings during test runs."""
def setUp(self):
super(WarningsFixture, self).setUp()
# NOTE(sdague): Make deprecation warnings only happen once. Otherwise
# this gets kind of crazy given the way that upstream python libs use
# this.
warnings.simplefilter("once", DeprecationWarning)
self.addCleanup(warnings.resetwarnings)
| 33.243028 | 79 | 0.664549 |
056506ac8841d294192ff0e934b47ce303d317c1
| 1,082 |
py
|
Python
|
tests/test_foursquare.py
|
chrislawlor/classify-server
|
8730cd5e345149129fe565d3f65bc67e4cd51afd
|
[
"Apache-2.0"
] | null | null | null |
tests/test_foursquare.py
|
chrislawlor/classify-server
|
8730cd5e345149129fe565d3f65bc67e4cd51afd
|
[
"Apache-2.0"
] | 7 |
2017-10-13T16:01:13.000Z
|
2021-06-01T21:36:03.000Z
|
tests/test_foursquare.py
|
chrislawlor/classify-server
|
8730cd5e345149129fe565d3f65bc67e4cd51afd
|
[
"Apache-2.0"
] | 4 |
2017-10-18T18:10:52.000Z
|
2019-02-01T08:39:18.000Z
|
from classify_server import foursquare
from .mocks import MockClient
async def test_match_venue_to_query_exact_match():
venues = [{'name': 'test'}]
query = 'test'
result = foursquare.match_venue_to_query(query, venues)
assert result == venues[0]
async def test_match_venue_to_query_small_difference():
venues = [{'name': 'Manhattan'}]
query = 'Manhatan'
result = foursquare.match_venue_to_query(query, venues)
assert result == venues[0]
async def test_match_venue_to_query_no_match():
venues = [{'name': 'foo'}]
query = 'bar'
result = foursquare.match_venue_to_query(query, venues)
assert result is None
async def test_make_request():
test_data = {'venues': []}
client = MockClient(test_data)
resp = await foursquare.make_request(client, 'url?intent=match')
assert resp == test_data
async def test_make_request_null_response():
test_data = {'error': 'testing error'}
client = MockClient(test_data, response_status=400)
resp = await foursquare.make_request(client, 'url')
assert resp is None
| 27.74359 | 68 | 0.710721 |
a727f826e1f7c2262c5c9543a5159515706a036b
| 129 |
py
|
Python
|
data/__init__.py
|
neelabh17/MAVI-Face
|
5dbf105b51a8b90203cd144f2fe671770d38eb81
|
[
"MIT"
] | 6 |
2020-05-04T08:21:05.000Z
|
2020-07-03T13:32:56.000Z
|
data/__init__.py
|
neelabh17/MAVI-Face
|
5dbf105b51a8b90203cd144f2fe671770d38eb81
|
[
"MIT"
] | 4 |
2020-04-30T00:57:54.000Z
|
2020-06-06T09:46:03.000Z
|
data/__init__.py
|
neelabh17/MAVI-Face
|
5dbf105b51a8b90203cd144f2fe671770d38eb81
|
[
"MIT"
] | 3 |
2020-05-04T08:21:10.000Z
|
2020-07-12T13:36:45.000Z
|
from .wider_face import WiderFaceDetection, detection_collate, ohemDataSampler
from .data_augment import *
from .config import *
| 32.25 | 78 | 0.837209 |
f396e36f2cbbf4ccc274c917fb7111ef852ff68f
| 10,212 |
py
|
Python
|
arc/web_functionality.py
|
zzpwahaha/ARC-Alkali-Rydberg-Calculator
|
8f15f5f0c5247c5d2254efdfd01894fcd3e5d586
|
[
"BSD-3-Clause"
] | null | null | null |
arc/web_functionality.py
|
zzpwahaha/ARC-Alkali-Rydberg-Calculator
|
8f15f5f0c5247c5d2254efdfd01894fcd3e5d586
|
[
"BSD-3-Clause"
] | null | null | null |
arc/web_functionality.py
|
zzpwahaha/ARC-Alkali-Rydberg-Calculator
|
8f15f5f0c5247c5d2254efdfd01894fcd3e5d586
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function, absolute_import
import numpy as np
from .alkali_atom_functions import printStateString, C_e, C_h, pi
def htmlLiteratureOutput(v, ref):
print("<div class='lit'><p>Literature values<p>Radial part of dipole matrix element: %.3f</p>" % v)
typeOfSource = "experimental value"
if ref[0] == 1:
typeOfSource = "theoretical value"
print("<p>Source: <a class='link' target='_blank' href='http://dx.doi.org/%s'>%s</a>, %s (%s) </p>" %
(ref[4], ref[3], typeOfSource, ref[2]))
print("</div>")
def rabiFrequencyWidget(atom, n1, l1, j1, n2, l2, j2, laserPower, laserWaist):
sol = []
inputMj = '<p>Rabi frequency $=$ <span id="rabival">0</span><p><form id="polarization" onchange="myFunction()">'
inputMj += '<p>for driving from <select id="mj" onchange="myFunction()">'
index = 0
for mj1 in np.linspace(-j1, j1, int(round(2 * j1 + 1))):
inputMj += ('<option value="%d">m_j = %d/2 ' %
(index, int(round(2. * mj1))))
arr = []
for q in [-1, 0, 1]:
if abs(mj1 + q) - 0.1 < j2:
rabiFreq = atom.getRabiFrequency(n1, l1, j1, mj1, n2, l2, j2,
q, laserPower,
laserWaist) / (2 * pi)
arr.append("$2 \\pi \\times$" +
printValueString(rabiFreq, "Hz", decimalPlaces=2))
else:
arr.append("not coupled")
sol.append(arr)
index += 1
inputMj += '</select>\
<input type="radio" name="colors" id="sigma-" value="0" >$\sigma^-$ | \
<input type="radio" name="colors" id="pi" value="1" checked>$\pi$ |\
<input type="radio" name="colors" id="sigma+" value="2" >$\sigma^+$\
transition</p></form>'
script = "<script id='returnscript' type='text/javascript'>"
script = script + "var rabiFreq =" + str(sol) + "; "
script += 'function myFunction() {\
var mj = document.getElementById("mj").value;\
var p = 0;\
if (document.getElementById("sigma-").checked){\
p=0;\
}\
if (document.getElementById("pi").checked){\
p=1; \
}\
if (document.getElementById("sigma+").checked){\
p=2; \
}\
document.getElementById("rabival").innerHTML = rabiFreq[mj][p] ;\
MathJax.Hub.Queue(["Typeset",MathJax.Hub,"rabival"]);\
}\
document.getElementById("polarization").addEventListener("click", myFunction);\
myFunction();\
</script>'
return inputMj + script
def printValueString(value, unit, decimalPlaces=3):
prefix = ["f", "p", "n", "$\\mu$", "m", "", "k", "M", "G", "T"]
i = 5
sg = 1.
if value < 0:
sg = -1.
value = abs(value)
formatString = "%%.%df %%s%%s" % decimalPlaces
if value > 1000:
while (value > 1000)and(i < 9):
value = value * 1.e-3
i += 1
return formatString % (sg * value, prefix[i], unit)
elif value < 1:
while (value < 1) and (i > 0):
value = value * 1.e3
i -= 1
return formatString % (sg * value, prefix[i], unit)
else:
return formatString % (sg * value, "", unit)
def plotStarkMap(calc, units=1, xlim=[], ylim=[], filename=""):
originalState = calc.basisStates[calc.indexOfCoupledState]
n = originalState[0]
l = originalState[1]
j = originalState[2]
ax = webPlot()
x = []
y = []
yState = []
ax.xlabel = "E field (V/cm)"
coeff = 1.0
ax.ylabel = "Energy/h (GHz)"
if (units == 1):
# in cm^{-1}
coeff = 0.03336 # conversion factor from GHz to cm^{-1}
ax.ylabel = "Energy/(h c) (cm^{-1})"
if (ylim == []):
ylim = [calc.atom.getEnergy(n, l, j) * C_e / C_h * 1e-9 * coeff - 10,
calc.atom.getEnergy(n, l, j) * C_e / C_h * 1e-9 * coeff + 10]
for br in xrange(len(calc.y)):
for i in xrange(len(calc.y[br])):
yt = calc.y[br][i] * coeff
if (yt < ylim[1] and ylim[0] < yt):
x.append(calc.eFieldList[i])
y.append(yt)
yState.append(calc.highlight[br][i])
yState = np.array(yState)
sortOrder = yState.argsort(kind='heapsort')
x = np.array(x)
y = np.array(y)
x = x[sortOrder]
y = y[sortOrder]
yState = yState[sortOrder]
ct = "|< %s | \mu > |^2" % printStateString(n, l, j)
ax.scatter(x / 100., y, c=yState, cmin=0, cmax=1, ctitle=ct)
if (xlim == []):
xlim = [min(x) / 100., max(x) / 100.]
ax.printPlot(xlim=xlim, ylim=ylim, filename=filename, name="starkdiv1",
height=600)
return 0
def plotInteractionLevels(calc, xlim=[], ylim=[], filename=""):
ax = webPlot()
ax.xlabel = "R (\mu m)"
ax.ylabel = "\Delta E (GHz)"
if (calc.drivingFromState[0] == 0):
# colouring is based on the contribution of the original pair state here
ct = r"|< %s %.1f , %s %.1f | \mu > |^2$" % \
(printStateString(calc.n, calc.l, calc.j),
calc.m1,
printStateString(calc.nn, calc.ll, calc.jj),
calc.m1)
else:
# colouring is based on the coupling to different states
ct = "\Omega_\mu/\Omega"
x = []
y = []
yState = []
for br in xrange(len(calc.y)):
for i in xrange(len(calc.y[br])):
x.append(calc.r[i])
y.append(calc.y[br][i])
yState.append(calc.highlight[br][i])
yState = np.array(yState)
sortOrder = yState.argsort(kind='heapsort')
x = np.array(x)
y = np.array(y)
x = x[sortOrder]
y = y[sortOrder]
yState = yState[sortOrder]
ax.scatter(x, y, c=yState, cmin=0, cmax=1, ctitle=ct)
ax.printPlot(xlim=xlim, ylim=ylim, filename=filename, name="levelintdiv")
return
class webPlot:
def __init__(self):
self.traces = []
self.layout = []
self.traceNo = 0
self.xlabel = ""
self.ylabel = ""
self.layoutx = ""
self.layouty = ""
self.title = ""
def plot(self, x, y, type, name=""):
np.set_printoptions(threshold=1e10)
self.traceNo += 1
temp = "{ x:" + np.array2string(x, separator=',') + ",\n"
temp = temp + "y: " + np.array2string(y, separator=',') + ",\n"
if (type == "."):
temp += "mode: 'markers',\n marker: {size:5},\n"
elif (type == "-"):
temp += "mode: 'lines',\n"
temp += "name: '%s'" % name
temp += "}"
self.traces.append(temp)
def semilogx(self, x, y, type, name=""):
self.layoutx = "type:'log' ,\n\
tickformat :'.1e',\n "
self.plot(x, y, type, name)
def semilogy(self, x, y, type, name=""):
self.layouty = "type:'log' ,\n\
tickformat :'.1e',\n "
self.plot(x, y, type, name)
def scatter(self, x, y, c=[], cmin=0, cmax=1, ctitle="", name=""):
np.set_printoptions(threshold=1e10)
self.traceNo += 1
temp = "{ x:" + np.array2string(x, separator=',',) + ",\n"
temp = temp + "y: " + np.array2string(y, separator=',') + ",\n"
temp += "name: '%s',\n" % name
if (c != []):
temp = temp + " text: " + np.array2string(c, separator=',') + ",\n"
temp += "mode: 'markers',\n"
if (c != []):
temp = temp + "marker:{\n\
color:" + np.array2string(c, separator=',') + ",\n\
cmin:%f,\n\
cmax:%f,\n\
showscale: true,\n\
colorbar:{\n\
title:'" % (cmin, cmax) + str(ctitle) + "',\n\
},\n\
size:5\n\
},\n"
else:
temp = temp + "marker:{\n\
size:5\n\
},\n"
temp += "}"
self.traces.append(temp)
def printPlot(self, name="", width=600, height=363, xlim=[], ylim=[], filename="",
scriptName="returnscript"):
d = ""
i = 0
while i < self.traceNo:
if i != 0:
d += ","
d += self.traces[i]
i += 1
d = "data=[" + d + "];\n"
xLimData = ""
if (not xlim == []):
xLimData = "range: [%.2E,%.2E],\n" % (xlim[0], xlim[1])
yLimData = ""
if (not ylim == []):
yLimData = "range: [%.2E,%.2E],\n" % (ylim[0], ylim[1])
# now layout
l = "layout = {\n\
hovermode: 'closest',\n\
xaxis:{\n\
zeroline:false,\n\
" + self.layoutx + "\
" + xLimData + "\
title: '" + self.xlabel + "',\n\
ticks: 'inside',\n\
showline: true\n\
},\n\
yaxis:{\n\
zeroline:false,\n\
" + self.layouty + "\
" + yLimData + "\
title: '" + self.ylabel + "',\n\
ticks: 'inside' ,\n\
showline: true \n\
}\n\
};\n"
if filename == "":
if name == "":
name = 'plotdiv'
if (self.title != ""):
print("<p>" + self.title + "</p>")
print("<div id='" + name +
"' style='width:%dpx;height:%dpx;'></div>\n" % (width, height))
print("<script id='" + scriptName + "' type='text/javascript'>\n")
print("plotarea = document.getElementById('" + name + "');\n")
print(d)
print(l)
print("Plotly.plot(plotarea, data, layout);\n")
print("</script>\n")
else:
f = open(filename, "w")
if name == "":
name = 'plotdiv'
if (self.title != ""):
f.write("<p>" + self.title + "</p>")
f.write("<div id='" + name +
"' style='width:%dpx;height:%dpx;'></div>\n" % (width, height))
f.write("<script id='" + scriptName +
"' type='text/javascript'>\n")
f.write("plotarea = document.getElementById('" + name + "')\n")
f.write(d)
f.write(l)
f.write("Plotly.plot(plotarea, data, layout);\n")
f.write("</script>\n")
f.close()
| 32.113208 | 116 | 0.479828 |
3a95db8d1a6315f0725725741243263b22ca6c2c
| 1,724 |
py
|
Python
|
netwho-collector.py
|
TheProjecter/netwho
|
89be92d90ddf2b54ecc7e05087359b26ce92ecce
|
[
"Apache-2.0"
] | 1 |
2015-09-27T01:13:59.000Z
|
2015-09-27T01:13:59.000Z
|
netwho-collector.py
|
TheProjecter/netwho
|
89be92d90ddf2b54ecc7e05087359b26ce92ecce
|
[
"Apache-2.0"
] | null | null | null |
netwho-collector.py
|
TheProjecter/netwho
|
89be92d90ddf2b54ecc7e05087359b26ce92ecce
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2010 The NetWho Project. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Daemon to sniff traffic and dump device information to disk."""
__author__ = 'thomas%stromberg.org (Thomas Stromberg)'
import optparse
from lib import sniffer
if __name__ == '__main__':
# For the time-being, we only accept pcap data as an argument.
parser = optparse.OptionParser()
parser.add_option('-r', '--file', dest='pcap_filename', default=None,
type='str', help='Path to pcap file to parse')
parser.add_option('-i', '--interface', dest='interface', default=None,
type='str', help='Ethernet interface to use')
parser.add_option('-k', '--keywords', dest='keywords', default=None,
type='str', help='Keywords to notify on if unmatched packet appears.')
(options, args) = parser.parse_args()
if args:
filter = args[0]
else:
filter = None
if options.keywords:
keywords = options.keywords.split(',')
else:
keywords = None
ids = sniffer.Sniffer(pcap_filename=options.pcap_filename, interface=options.interface, filter=filter,
keywords=keywords)
ids.process_input()
| 37.478261 | 104 | 0.694896 |
9ce6d5b5458beffea8f254f22989fd7fa487af9e
| 2,966 |
py
|
Python
|
tempest/services/identity/v3/json/policy_client.py
|
BeenzSyed/tempest
|
7a64ee1216d844f6b99928b53f5c665b84cb8719
|
[
"Apache-2.0"
] | null | null | null |
tempest/services/identity/v3/json/policy_client.py
|
BeenzSyed/tempest
|
7a64ee1216d844f6b99928b53f5c665b84cb8719
|
[
"Apache-2.0"
] | null | null | null |
tempest/services/identity/v3/json/policy_client.py
|
BeenzSyed/tempest
|
7a64ee1216d844f6b99928b53f5c665b84cb8719
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from urlparse import urlparse
from tempest.common.rest_client import RestClient
class PolicyClientJSON(RestClient):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(PolicyClientJSON, self).__init__(config, username, password,
auth_url, tenant_name)
self.service = self.config.identity.catalog_type
self.endpoint_url = 'adminURL'
def request(self, method, url, headers=None, body=None, wait=None):
"""Overriding the existing HTTP request in super class rest_client."""
self._set_auth()
self.base_url = self.base_url.replace(urlparse(self.base_url).path,
"/v3")
return super(PolicyClientJSON, self).request(method, url,
headers=headers,
body=body)
def create_policy(self, blob, type):
"""Creates a Policy."""
post_body = {
"blob": blob,
"type": type
}
post_body = json.dumps({'policy': post_body})
resp, body = self.post('policies', post_body, self.headers)
body = json.loads(body)
return resp, body['policy']
def list_policies(self):
"""Lists the policies."""
resp, body = self.get('policies')
body = json.loads(body)
return resp, body['policies']
def get_policy(self, policy_id):
"""Lists out the given policy."""
url = 'policies/%s' % policy_id
resp, body = self.get(url)
body = json.loads(body)
return resp, body['policy']
def update_policy(self, policy_id, **kwargs):
"""Updates a policy."""
resp, body = self.get_policy(policy_id)
type = kwargs.get('type')
post_body = {
'type': type
}
post_body = json.dumps({'policy': post_body})
url = 'policies/%s' % policy_id
resp, body = self.patch(url, post_body,
self.headers)
body = json.loads(body)
return resp, body['policy']
def delete_policy(self, policy_id):
"""Deletes the policy."""
url = "policies/%s" % policy_id
return self.delete(url)
| 36.617284 | 79 | 0.590695 |
42b3938660f5c8596816a1fd1b5560116c8c3f7f
| 2,276 |
py
|
Python
|
main.py
|
nutszebra/squeeze_net
|
85a0f99fce87f542b8c19c27ed3c80f0e2d867c2
|
[
"MIT"
] | 5 |
2016-12-24T13:47:06.000Z
|
2020-03-20T16:26:04.000Z
|
main.py
|
nutszebra/squeeze_net
|
85a0f99fce87f542b8c19c27ed3c80f0e2d867c2
|
[
"MIT"
] | null | null | null |
main.py
|
nutszebra/squeeze_net
|
85a0f99fce87f542b8c19c27ed3c80f0e2d867c2
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('./trainer')
import nutszebra_cifar10
import nutszebra_optimizer
import squeeze_net
import argparse
import trainer.nutszebra_data_augmentation as da
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='cifar10')
parser.add_argument('--load_model', '-m',
default=None,
help='trained model')
parser.add_argument('--load_optimizer', '-o',
default=None,
help='optimizer for trained model')
parser.add_argument('--load_log', '-l',
default=None,
help='optimizer for trained model')
parser.add_argument('--save_path', '-p',
default='./',
help='model and optimizer will be saved at every epoch')
parser.add_argument('--epoch', '-e', type=int,
default=300,
help='maximum epoch')
parser.add_argument('--batch', '-b', type=int,
default=64,
help='mini batch number')
parser.add_argument('--gpu', '-g', type=int,
default=-1,
help='-1 means cpu, put gpu id here')
parser.add_argument('--start_epoch', '-s', type=int,
default=1,
help='start from this epoch')
parser.add_argument('--train_batch_divide', '-trb', type=int,
default=1,
help='divid train batch number by this')
parser.add_argument('--test_batch_divide', '-teb', type=int,
default=1,
help='divid test batch number by this')
parser.add_argument('--lr', '-lr', type=float,
default=0.1,
help='leraning rate')
args = parser.parse_args().__dict__
lr = args.pop('lr')
print('generating model')
model = squeeze_net.SqueezeNet(10)
print('Done')
optimizer = nutszebra_optimizer.OptimizerDense(model, lr=lr)
args['model'] = model
args['optimizer'] = optimizer
args['da'] = da.DataAugmentationCifar10NormalizeBig
main = nutszebra_cifar10.TrainCifar10(**args)
main.run()
| 39.241379 | 80 | 0.539543 |
d94efd62613c72088b07ff88dc398a639bc09c37
| 895 |
py
|
Python
|
src/z3c/xmlhttp/browser.py
|
zopefoundation/z3c.xmlhttp
|
370e2c5c7562f2d401563fe1dbe11ea3583854db
|
[
"ZPL-2.1"
] | null | null | null |
src/z3c/xmlhttp/browser.py
|
zopefoundation/z3c.xmlhttp
|
370e2c5c7562f2d401563fe1dbe11ea3583854db
|
[
"ZPL-2.1"
] | null | null | null |
src/z3c/xmlhttp/browser.py
|
zopefoundation/z3c.xmlhttp
|
370e2c5c7562f2d401563fe1dbe11ea3583854db
|
[
"ZPL-2.1"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id:$
"""
from zope.viewlet.viewlet import JavaScriptViewlet
from zope.viewlet.interfaces import IViewletManager
class IJavaScript(IViewletManager):
"""JavaScript viewlet manager."""
XMLHTTPJavaScriptViewlet = JavaScriptViewlet('z3c.xmlhttp.js')
| 33.148148 | 78 | 0.634637 |
4e10305e7e413c50fb48005de6239a03241a3cf2
| 278 |
py
|
Python
|
rameniaapp/migrations/0018_merge_20201029_0154.py
|
awlane/ramenia
|
6bf8e75a1f279ac584daa4ee19927ffccaa67551
|
[
"MIT"
] | null | null | null |
rameniaapp/migrations/0018_merge_20201029_0154.py
|
awlane/ramenia
|
6bf8e75a1f279ac584daa4ee19927ffccaa67551
|
[
"MIT"
] | null | null | null |
rameniaapp/migrations/0018_merge_20201029_0154.py
|
awlane/ramenia
|
6bf8e75a1f279ac584daa4ee19927ffccaa67551
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.1 on 2020-10-29 01:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rameniaapp', '0017_profile_following'),
('rameniaapp', '0016_auto_20201015_0207'),
]
operations = [
]
| 18.533333 | 50 | 0.654676 |
13d81f89704055fe9bfa69b09b3745b9785d461a
| 8,042 |
py
|
Python
|
WriteRecord.py
|
Larz60p/Python-Record-Structure
|
e1af2b22845413e06e07bf214fd677b75ab5f7ae
|
[
"MIT"
] | 1 |
2016-12-06T18:49:10.000Z
|
2016-12-06T18:49:10.000Z
|
WriteRecord.py
|
Larz60p/Python-Record-Structure
|
e1af2b22845413e06e07bf214fd677b75ab5f7ae
|
[
"MIT"
] | null | null | null |
WriteRecord.py
|
Larz60p/Python-Record-Structure
|
e1af2b22845413e06e07bf214fd677b75ab5f7ae
|
[
"MIT"
] | null | null | null |
"""
Example usage of Record class
The MIT License (MIT)
Copyright (c) <2016> <Larry McCaig (aka: Larz60+ aka: Larz60p)>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import json
class WriteRecord:
def create_data_file(self):
self.jsonrec = {
"ASXListedCompanies": {
"filename": "ASXListedCompanies.csv",
"source": "ASX",
"url": "http://www.asx.com.au/asx/research/ASXListedCompanies.csv",
"local_file": "/stock_market/data/DailyFiles/USA/ASXListedCompanies.csv",
"notes":
"Timestamp first format: 'ASX listed companies as at Sat Aug 13"
" 21:00:02 EST 2016' followed by several empty lines, followed"
" by header format: 'Company name,ASX code,GICS industry group'",
"delimiter": ",",
"numfields": 3,
"dbtablename": "ASXListed",
"dbtabledesc": "ASX - one of the world\u2019s leading financial market exchanges",
"columns": [
{
"field_name": "Company name",
"db_column_name": "CompanyName",
"db_column_desc": "The company name",
"db_column_type": "VARCHAR"
},
{
"field_name": "ASX code",
"db_column_name": "AsxSymbol",
"db_column_desc": "The ASX Code (symbol)",
"db_column_type": "VARCHAR"
},
{
"field_name": "GICS industry group",
"db_column_name": "GicsIndustryGroup",
"db_column_desc": "Name of Industry Group",
"db_column_type": "VARCHAR"
}
]
},
"nasdaqlisted": {
"filename": "nasdaqlisted.txt",
"source": "NASDAQ",
"url": "ftp://ftp.nasdaqtrader.com/symboldirectory/nasdaqlisted.txt",
"local_file": "\stock_market\data\DailyFiles\\USA\\nasdaqlisted.txt",
"notes":
"Order Must be maintained in header - Use Number key to access"
"The last row of each Symbol Directory text file contains a"
" timestamp that reports the File Creation Time. The file"
" creation time is based on when NASDAQ Trader generates the"
" file and can be used to determine the timeliness of the"
" associated data. The row contains the words File Creation Time"
" followed by mmddyyyyhhmm as the first field, followed by all"
" delimiters to round out the row. An example: File Creation"
" Time: 1217200717:03|||||"
"CreatedDate - 'File Creation Time: MMDDYYYYHR:MN']",
"delimiter": "|",
"numfields": 8,
"dbtablename": "NasdaqListed",
"dbtabledesc":
"ASX is one of the world’s leading financial market"
" exchanges, offering a full suite of services,"
" including listings, trading, clearing and settlement,"
" across a comprehensive range of asset classes. As the"
" first major financial market open every day, ASX is a"
" world leader in raising capital, consistently ranking"
" among the top five exchanges globally. With a total"
" market capitalisation of around $1.5 trillion, ASX is"
" home to some of the world’s leading resource, finance"
" and technology companies. Our $47 trillion interest rate"
" derivatives market is the largest in Asia and among the"
" biggest in the world.",
"columns": [
{
"field_name": "Symbol",
"db_column_name": "Symbol",
"db_column_desc":
"The one to four or five character identifier for each"
" NASDAQ-listed security.",
"db_column_type": "VARCHAR"
},
{
"field_name": "Security Name",
"db_column_name": "SecurityName",
"db_column_desc": "Company issuing the security.",
"db_column_type": "VARCHAR"
},
{
"field_name": "Market Category",
"db_column_name": "MarketCategory",
"db_column_desc": "The category assigned to the issue by NASDAQ based on Listing Requirements. Values",
"db_column_type": "VARCHAR"
},
{
"field_name": "Test Issue",
"db_column_name": "TestIssue",
"db_column_desc": "Indicates whether or not the security is a test security.",
"db_column_type": "VARCHAR"
},
{
"field_name": "Financial Status",
"db_column_name": "FinancialStatus",
"db_column_desc": "Indicates when an issuer has failed to submit its regulatory filings on a timely basis, has failed to meet NASDAQ's continuing listing standards, and/or has filed for bankruptcy.",
"db_column_type": "VARCHAR"
},
{
"field_name": "Round Lot Size",
"db_column_name": "RoundLotSize",
"db_column_desc": "Indicates the number of shares that make"
" up a round lot for the given security.",
"db_column_type": "NUMERIC"
},
{
"field_name": "ETF",
"db_column_name": "ETF",
"db_column_desc": "Identifies whether the security is an"
" exchange traded fund",
"db_column_type": "VARCHAR"
},
{
"field_name": "Next Shares",
"db_column_name": "NextSghares",
"db_column_desc": "",
"db_column_type": "VARCHAR"
}
]
}
}
with open('StockData.json', 'w') as f:
j = json.dumps(self.jsonrec)
f.write(j)
if __name__ == '__main__':
wd = WriteRecord()
wd.create_data_file()
print(wd.jsonrec)
| 49.036585 | 223 | 0.505844 |
20faa100016e96d5ff3397aa8bd253e9e0f9920d
| 4,300 |
py
|
Python
|
tests/accelerators/test_multi_nodes_gpu.py
|
prajakta0111/pytorch-lightning
|
3df02b880a6d145ff0aca24ea429c12c0d8f1181
|
[
"Apache-2.0"
] | 1 |
2021-08-05T01:45:26.000Z
|
2021-08-05T01:45:26.000Z
|
tests/accelerators/test_multi_nodes_gpu.py
|
prajakta0111/pytorch-lightning
|
3df02b880a6d145ff0aca24ea429c12c0d8f1181
|
[
"Apache-2.0"
] | 1 |
2021-03-01T17:32:12.000Z
|
2021-03-01T17:32:12.000Z
|
tests/accelerators/test_multi_nodes_gpu.py
|
prajakta0111/pytorch-lightning
|
3df02b880a6d145ff0aca24ea429c12c0d8f1181
|
[
"Apache-2.0"
] | 1 |
2021-02-16T00:47:46.000Z
|
2021-02-16T00:47:46.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from unittest import mock
import pytest
import torch
ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
sys.path.insert(0, ROOT)
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
from pytorch_lightning import LightningModule # noqa: E402
from pytorch_lightning import Trainer # noqa: E402
from tests.helpers.boring_model import BoringModel # noqa: E402
@pytest.mark.skipif(
not os.getenv("PL_RUNNING_SPECIAL_TESTS", '0') == '1', reason="test should be run outside of pytest"
)
def test_logging_sync_dist_true_ddp(tmpdir):
"""
Tests to ensure that the sync_dist flag works with CPU (should just return the original value)
"""
fake_result = 1
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
acc = self.step(batch[0])
self.log('foo', torch.tensor(fake_result), on_step=False, on_epoch=True)
return acc
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log('bar', torch.tensor(fake_result), on_step=False, on_epoch=True)
return {"x": loss}
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=1,
limit_val_batches=1,
max_epochs=2,
weights_summary=None,
accelerator="ddp",
gpus=1,
num_nodes=2,
)
trainer.fit(model)
assert trainer.logged_metrics['foo'] == fake_result
assert trainer.logged_metrics['bar'] == fake_result
@pytest.mark.skipif(
not os.getenv("PL_RUNNING_SPECIAL_TESTS", '0') == '1', reason="test should be run outside of pytest"
)
@mock.patch.dict(os.environ, {"PL_DEV_DEBUG": "1"})
def test__validation_step__log(tmpdir):
"""
Tests that validation_step can log
"""
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
acc = self.step(batch)
acc = acc + batch_idx
self.log('a', acc, on_step=True, on_epoch=True)
self.log('a2', 2)
self.training_step_called = True
return acc
def validation_step(self, batch, batch_idx):
acc = self.step(batch)
acc = acc + batch_idx
self.log('b', acc, on_step=True, on_epoch=True)
self.training_step_called = True
def backward(self, loss, optimizer, optimizer_idx):
return LightningModule.backward(self, loss, optimizer, optimizer_idx)
model = TestModel()
model.validation_step_end = None
model.validation_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
weights_summary=None,
accelerator="ddp",
gpus=1,
num_nodes=2,
)
trainer.fit(model)
# make sure all the metrics are available for callbacks
expected_logged_metrics = {
'a2',
'a_step',
'a_epoch',
'b_step/epoch_0',
'b_step/epoch_1',
'b_epoch',
'epoch',
}
logged_metrics = set(trainer.logged_metrics.keys())
assert expected_logged_metrics == logged_metrics
# we don't want to enable val metrics during steps because it is not something that users should do
# on purpose DO NOT allow step_b... it's silly to monitor val step metrics
callback_metrics = set(trainer.callback_metrics.keys())
callback_metrics.remove('debug_epoch')
expected_cb_metrics = {'a', 'a2', 'b', 'a_epoch', 'b_epoch', 'a_step'}
assert expected_cb_metrics == callback_metrics
| 32.089552 | 104 | 0.661395 |
4b3ae14882c9827343ee2613e8a45d3d07b08241
| 5,391 |
py
|
Python
|
lyssa/feature_extract/spatial_pyramid.py
|
himanshukgp/Lyssandra
|
7c6967e01548d00ace20faf6415f8df00a2578e8
|
[
"BSD-3-Clause"
] | 65 |
2016-07-07T11:16:58.000Z
|
2022-03-28T08:38:57.000Z
|
lyssa/feature_extract/spatial_pyramid.py
|
kevinyu1949/Lyssandra
|
994da67a8bf4a63561cf4d6c58c7e58722e192f7
|
[
"BSD-3-Clause"
] | 2 |
2017-11-12T04:49:06.000Z
|
2018-05-15T21:56:25.000Z
|
lyssa/feature_extract/spatial_pyramid.py
|
kevinyu1949/Lyssandra
|
994da67a8bf4a63561cf4d6c58c7e58722e192f7
|
[
"BSD-3-Clause"
] | 25 |
2017-01-12T11:25:48.000Z
|
2022-03-28T08:42:20.000Z
|
import numpy as np
from lyssa.utils import get_mmap
from lyssa.feature_extract.dsift import DsiftExtractor
from lyssa.utils.img import grid_patches
from lyssa.utils import run_parallel
class dsift_extractor():
def __init__(self, step_size=None, patch_size=None):
# self.step_size = step_size
self.patch_size = patch_size
self.extractor = DsiftExtractor(grid_spacing=step_size, patch_size=patch_size)
def extract(self, img):
dsift_patches, pos = self.extractor.process_image(img, positionNormalize=False)
dsift_patches = dsift_patches.T
pos = pos.T
return dsift_patches, pos
class patch_extractor():
def __init__(self, step_size=None, patch_size=None):
self.step_size = step_size
self.patch_size = patch_size
def extract(self, img):
patches, loc_h, loc_w = grid_patches(img, patch_size=self.patch_size,
step_size=self.step_size, scale=True, return_loc=True)
pos = np.array([loc_h, loc_w]).T
return patches, pos
class sc_spm_extractor():
def __init__(self, feature_extractor=None, levels=(1, 2, 4),
sparse_coder=None, pooling_operator=None, normalizer=None):
self.feature_extractor = feature_extractor
self.levels = levels
self.sparse_coder = sparse_coder
# self.dictionary = dictionary
self.pooling_operator = pooling_operator
self.normalizer = normalizer
def encode(self, imgs, dictionary):
psize = self.feature_extractor.patch_size
n_imgs = len(imgs)
n_atoms = dictionary.shape[1]
cells = np.array(self.levels) ** 2
n_features = np.sum(cells) * n_atoms
Z = np.zeros((n_features, n_imgs))
for k in range(n_imgs):
img = imgs[k]
desc, pos = self.feature_extractor.extract(img)
# px,py contain the locations of the top-left pixels
# cx,cy -//- of the center pixels of each patch
py = pos[:, 0]
px = pos[:, 1]
cy = py + float(psize) / 2 - 0.5
cx = px + float(psize) / 2 - 0.5
# sparsely encode the patch
coded_patches = self.sparse_coder.encode(desc, dictionary)
n_atoms = coded_patches.shape[0]
n_total_cells = np.sum(cells)
imsize = img.shape
# pre-allocate
# i.e (n_total_cells,n_atoms)
poolpatches = np.zeros((n_total_cells, n_atoms))
cnt = 0
# iterate over all the cells in the pyramid
for (i, lev) in enumerate(self.levels):
# find width and height
# the cell in current level
wunit = float(imsize[1]) / lev
hunit = float(imsize[0]) / lev
# Find patch-cell memberships
binidx = np.floor(cy / hunit) * lev + np.floor(cx / wunit)
for j in range(cells[i]):
# get the patch indices of the patches
# in the j-th cell of the i-th layer
pidx = np.nonzero(binidx == j)[0]
if len(pidx) > 0:
# pool and then normalize
# all the patches in the same cell
poolpatches[cnt, :] = self.pooling_operator(coded_patches[:, pidx])
if self.normalizer is not None:
poolpatches[cnt, :] = self.normalizer(poolpatches[cnt, :])
cnt += 1
Z[:, k] = poolpatches.flatten()
return Z
def pyramid_feat_extract(imgs, extractor, D):
return extractor.encode(imgs, D)
class spatial_pyramid():
"""
A class used to extract ScSPM features from a dataset
"""
def __init__(self, mmap=False, workspace=None, metadata=None):
self.workspace = workspace
self.metadata = metadata
self.D = None
self.mmap = mmap
def extract(self, imgs, pyramid_feat_extractor=None, save=True, n_jobs=4):
if self.D is None:
self.D = self.workspace.load("dict.npy")
n_imgs = len(imgs)
levels = (1, 2, 4)
n_atoms = self.D.shape[1]
n_features = np.sum(np.array(levels) ** 2) * n_atoms
Z = run_parallel(func=pyramid_feat_extract, data=imgs, args=(pyramid_feat_extractor, self.D),
result_shape=(n_features, n_imgs), n_batches=100, mmap=self.mmap,
msg="building ScSPM features", n_jobs=n_jobs)
if save:
self.workspace.save("features.npy", Z)
return Z
def dict_learn(self, imgs, feature_extractor=None, dict_learner=None):
if not self.workspace.contains("descriptors.npy"):
self.descriptors = feature_extractor(imgs)
self.workspace.save("descriptors.npy", self.descriptors)
else:
self.descriptors = self.workspace.load("descriptors.npy")
if self.mmap:
self.descriptors = get_mmap(self.descriptors)
print "descriptors extracted"
if not self.workspace.contains("dict.npy"):
dict_learner.fit(self.descriptors)
self.D = dict_learner.D
self.workspace.save("dict.npy", self.D)
else:
self.D = self.workspace.load("dict.npy")
| 35.94 | 101 | 0.584678 |
478456393d05211d5ca9a45d70d31f232e6ade13
| 3,038 |
py
|
Python
|
lpot/experimental/metric/f1.py
|
intelkevinputnam/lpot-docs
|
1ff32b4d89074a6bd133ba531f7c0cea3b73152f
|
[
"Apache-2.0"
] | null | null | null |
lpot/experimental/metric/f1.py
|
intelkevinputnam/lpot-docs
|
1ff32b4d89074a6bd133ba531f7c0cea3b73152f
|
[
"Apache-2.0"
] | null | null | null |
lpot/experimental/metric/f1.py
|
intelkevinputnam/lpot-docs
|
1ff32b4d89074a6bd133ba531f7c0cea3b73152f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Official evaluation script for v1.1 of the SQuAD dataset.
https://github.com/allenai/bi-att-flow/blob/master/squad/evaluate-v1.1.py """
from collections import Counter, abc
import string
import re
from lpot.utils import logger
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
assert isinstance(prediction, abc.Sequence) and isinstance(ground_truth, abc.Sequence),\
'prediction and ground_truth should be Sequence'
common = Counter(prediction) & Counter(ground_truth)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction)
recall = 1.0 * num_same / len(ground_truth)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
score = metric_fn(prediction_tokens, ground_truth_tokens)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(predictions, dataset):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
logger.warning(message)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
f1 = 100.0 * f1 / total
return f1
| 36.166667 | 93 | 0.663594 |
3bb59236530bfa6778943d4955964e6b4a85b758
| 3,342 |
py
|
Python
|
examples/visualization_examples.py
|
joannetruong/habitat-api
|
aad2fd7b8545dce44daefd4b7b3941672eb96ee3
|
[
"MIT"
] | 489 |
2019-02-21T21:47:40.000Z
|
2020-08-10T06:43:24.000Z
|
examples/visualization_examples.py
|
joannetruong/habitat-api
|
aad2fd7b8545dce44daefd4b7b3941672eb96ee3
|
[
"MIT"
] | 380 |
2019-02-26T00:50:48.000Z
|
2020-08-11T14:57:07.000Z
|
examples/visualization_examples.py
|
joannetruong/habitat-api
|
aad2fd7b8545dce44daefd4b7b3941672eb96ee3
|
[
"MIT"
] | 167 |
2019-02-26T00:38:30.000Z
|
2020-08-09T23:07:10.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import imageio
import numpy as np
import habitat
from habitat.tasks.nav.nav import NavigationEpisode, NavigationGoal
from habitat.utils.visualizations import maps
IMAGE_DIR = os.path.join("examples", "images")
if not os.path.exists(IMAGE_DIR):
os.makedirs(IMAGE_DIR)
def example_pointnav_draw_target_birdseye_view():
goal_radius = 0.5
goal = NavigationGoal(position=[10, 0.25, 10], radius=goal_radius)
agent_position = np.array([0, 0.25, 0])
agent_rotation = -np.pi / 4
dummy_episode = NavigationEpisode(
goals=[goal],
episode_id="dummy_id",
scene_id="dummy_scene",
start_position=agent_position,
start_rotation=agent_rotation,
)
target_image = maps.pointnav_draw_target_birdseye_view(
agent_position,
agent_rotation,
np.asarray(dummy_episode.goals[0].position),
goal_radius=dummy_episode.goals[0].radius,
agent_radius_px=25,
)
imageio.imsave(
os.path.join(IMAGE_DIR, "pointnav_target_image.png"), target_image
)
def example_pointnav_draw_target_birdseye_view_agent_on_border():
goal_radius = 0.5
goal = NavigationGoal(position=[0, 0.25, 0], radius=goal_radius)
ii = 0
for x_edge in [-1, 0, 1]:
for y_edge in [-1, 0, 1]:
if not np.bitwise_xor(x_edge == 0, y_edge == 0):
continue
ii += 1
agent_position = np.array([7.8 * x_edge, 0.25, 7.8 * y_edge])
agent_rotation = np.pi / 2
dummy_episode = NavigationEpisode(
goals=[goal],
episode_id="dummy_id",
scene_id="dummy_scene",
start_position=agent_position,
start_rotation=agent_rotation,
)
target_image = maps.pointnav_draw_target_birdseye_view(
agent_position,
agent_rotation,
np.asarray(dummy_episode.goals[0].position),
goal_radius=dummy_episode.goals[0].radius,
agent_radius_px=25,
)
imageio.imsave(
os.path.join(
IMAGE_DIR, "pointnav_target_image_edge_%d.png" % ii
),
target_image,
)
def example_get_topdown_map():
config = habitat.get_config(config_paths="configs/tasks/pointnav.yaml")
dataset = habitat.make_dataset(
id_dataset=config.DATASET.TYPE, config=config.DATASET
)
with habitat.Env(config=config, dataset=dataset) as env:
env.reset()
top_down_map = maps.get_topdown_map_from_sim(
env.sim, map_resolution=1024
)
recolor_map = np.array(
[[255, 255, 255], [128, 128, 128], [0, 0, 0]], dtype=np.uint8
)
top_down_map = recolor_map[top_down_map]
imageio.imsave(
os.path.join(IMAGE_DIR, "top_down_map.png"), top_down_map
)
def main():
example_pointnav_draw_target_birdseye_view()
example_get_topdown_map()
example_pointnav_draw_target_birdseye_view_agent_on_border()
if __name__ == "__main__":
main()
| 30.66055 | 75 | 0.627768 |
665b4e5a7c24b0aa6ce16236628ec5e6eb53d19e
| 3,328 |
py
|
Python
|
core/__init__.py
|
xezzz/Twitch-Bot
|
5370cb6918c417a88e8ce73401d562de29a580ce
|
[
"MIT"
] | 1 |
2021-12-07T22:17:38.000Z
|
2021-12-07T22:17:38.000Z
|
core/__init__.py
|
xezzz/Twitch-Bot
|
5370cb6918c417a88e8ce73401d562de29a580ce
|
[
"MIT"
] | null | null | null |
core/__init__.py
|
xezzz/Twitch-Bot
|
5370cb6918c417a88e8ce73401d562de29a580ce
|
[
"MIT"
] | null | null | null |
import asyncio
import platform
import time
import traceback
from tabulate import tabulate
import discord
from discord.ext.commands import AutoShardedBot
from utils import TwitchHTTP, Mongo, handle_notifications
plugins = [
"meta",
"twitch",
"notifications"
#"audio"
]
_ascii = r"""
_ _ _
| | (_) | |
| | ___ _____ ___ ___ _ __ __| |
| | | \ \ / / _ \/ __/ _ \| '__/ _` |
| |____| |\ V / __/ (_| (_) | | | (_| |
|______|_| \_/ \___|\___\___/|_| \__,_|
"""
class Livecord(AutoShardedBot):
READY = False
def __init__(self, config):
super().__init__(
command_prefix="!!", case_insensitive=True, max_messages=1000,
intents=discord.Intents.all(), chunk_guilds_at_startup=True
)
self.notif_cache = {}
self.sent_notification = []
self.loop.create_task(handle_notifications(self))
self.config = config
self.color = 0x6441a5
self.emotes = {
"arrow": "<:arrow:836558825481568296>",
"twitch": "<:twitch:836726608332193884>"
}
self.uptime = None
self.twitch_http = TwitchHTTP(self)
self.db = Mongo(self)
async def _run_event(self, coro, event_name, *args, **kwargs):
while not self.READY and event_name != "on_ready":
await asyncio.sleep(0.3)
await super()._run_event(coro, event_name, *args, **kwargs)
async def on_ready(self):
if not self.READY:
for plugin in plugins:
try:
self.load_extension("plugins.{}".format(plugin))
except Exception as e:
print("Failed to load plugin {}: \n{}".format(plugin, e))
print(_ascii)
table_rows = [
["discord.py", f"v{discord.__version__}"],
["python", f"v{platform.python_version()}"],
["system", f"{platform.system()} v{platform.version()}"],
["discord user", f"{self.user} (id: {self.user.id})"],
["guilds", len(self.guilds)],
["users", len(self.users)],
["shard ids", getattr(self, "shard_ids", "None")]
]
print("\n" + tabulate(table_rows))
self.uptime = time.time()
await self.change_presence(activity=discord.Streaming(name="!!help", url="https://twitch.tv/ezzztv"))
self.READY = True
else:
pass
async def on_guild_join(self, guild: discord.Guild):
if len([x for x in self.db.notifications.find({"id": f"{guild.id}"})]) == 0:
self.db.notifications.insert_one({
"id": f"{guild.id}",
"notifications": []
})
async def on_guild_remove(self, guild: discord.Guild):
if len([x for x in self.db.notifications.find({"id": f"{guild.id}"})]) == 1:
self.db.notifications.delete_one({"id": f"{guild.id}"})
def run(self):
try:
self.remove_command("help")
super().run(self.config['token'], reconnect=True)
except Exception:
e = traceback.format_exc()
print("Error in run() method, aborting! \n{}".format(e))
| 31.396226 | 113 | 0.528846 |
b75f4e6409d4d3f167cfa339d55bd0b8dfed32f1
| 572 |
py
|
Python
|
2565.py
|
WaiNaat/BOJ-Python
|
3365ef090c7dcf6e6a598fea0b25c416a5a3e01b
|
[
"MIT"
] | null | null | null |
2565.py
|
WaiNaat/BOJ-Python
|
3365ef090c7dcf6e6a598fea0b25c416a5a3e01b
|
[
"MIT"
] | null | null | null |
2565.py
|
WaiNaat/BOJ-Python
|
3365ef090c7dcf6e6a598fea0b25c416a5a3e01b
|
[
"MIT"
] | null | null | null |
import sys
input = sys.stdin.readline
# input
n = int(input())
wire = [tuple(map(int, input().split())) for _ in range(n)]
# process
'''
(A, B)쌍을 A 기준 오름차순 정렬,
B 전봇대의 위치들을 수열이라 치고
DP로 그 수열에서 감소하지 않는 부분 수열의 최대 길이를 구하는 문제!
위의 '그 수열'을 B라 하고
opt(i)를 B[0]~B[i]에서 B[i]를 마지막으로 하고 감소하지 않는 부분수열의 최대 길이라 하면
opt(i) =
1
opt(j) + 1 (j는 j<i, B[j] < B[i]인 j들 중 opt(j)가 최대인 j)
둘 중 큰 값
'''
wire.sort()
opt = [0 for _ in range(n)]
for i in range(n):
for j in range(i):
if wire[j][1] < wire[i][1] and opt[i] < opt[j]:
opt[i] = opt[j]
opt[i] += 1
# output
print(n - max(opt))
| 17.875 | 59 | 0.582168 |
5bcf3dd7dd17e8499232424dcfed6bfeafa02bd4
| 1,132 |
py
|
Python
|
scheduler/scheduler/urls.py
|
Awinja-j/Social-Media-post-Scheduler
|
4f95b4bb2ca3f890d3e22bcda859b94ebc483b87
|
[
"MIT"
] | 1 |
2021-05-08T08:21:06.000Z
|
2021-05-08T08:21:06.000Z
|
scheduler/scheduler/urls.py
|
Awinja-j/Social-Media-post-Scheduler
|
4f95b4bb2ca3f890d3e22bcda859b94ebc483b87
|
[
"MIT"
] | null | null | null |
scheduler/scheduler/urls.py
|
Awinja-j/Social-Media-post-Scheduler
|
4f95b4bb2ca3f890d3e22bcda859b94ebc483b87
|
[
"MIT"
] | null | null | null |
"""scheduler URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
path(r'', include('post_scheduler.urls')),
path(r'', include('policy.urls')),
# path(r'', include('services.facebook.urls')),
# path(r'', include('services.linkedIn.urls')),
# path(r'', include('services.twitter.urls')),
path(r'', include('user_auth.urls')),
path('accounts/', include('django.contrib.auth.urls')),
]
| 36.516129 | 77 | 0.685512 |
05acdecd3c2a6857b5df6df6d31ef7f35d481c82
| 405 |
py
|
Python
|
twitter_killer_api/asgi.py
|
vadikam100500/Twitter_killer_API
|
f58dfe5edd4860942d87299d7bdedadc9fd33ffe
|
[
"MIT"
] | 1 |
2021-08-19T16:27:03.000Z
|
2021-08-19T16:27:03.000Z
|
twitter_killer_api/asgi.py
|
vadikam100500/Twitter_killer_API
|
f58dfe5edd4860942d87299d7bdedadc9fd33ffe
|
[
"MIT"
] | null | null | null |
twitter_killer_api/asgi.py
|
vadikam100500/Twitter_killer_API
|
f58dfe5edd4860942d87299d7bdedadc9fd33ffe
|
[
"MIT"
] | null | null | null |
"""
ASGI config for yatube_api project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'twitter_killer_api.settings')
application = get_asgi_application()
| 23.823529 | 78 | 0.792593 |
ff65b39c8c362b754cba401272a5dcfb7f2b54c0
| 18,986 |
py
|
Python
|
predicting_movie_reviews_with_bert_on_tf_hub.py
|
hupidong/bert
|
16816eb43ae4246ea1563d708b4878a18a4c2e11
|
[
"Apache-2.0"
] | null | null | null |
predicting_movie_reviews_with_bert_on_tf_hub.py
|
hupidong/bert
|
16816eb43ae4246ea1563d708b4878a18a4c2e11
|
[
"Apache-2.0"
] | null | null | null |
predicting_movie_reviews_with_bert_on_tf_hub.py
|
hupidong/bert
|
16816eb43ae4246ea1563d708b4878a18a4c2e11
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# Copyright 2019 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# #Predicting Movie Review Sentiment with BERT on TF Hub
# If you’ve been following Natural Language Processing over the past year, you’ve probably heard of BERT: Bidirectional Encoder Representations from Transformers. It’s a neural network architecture designed by Google researchers that’s totally transformed what’s state-of-the-art for NLP tasks, like text classification, translation, summarization, and question answering.
#
# Now that BERT's been added to [TF Hub](https://www.tensorflow.org/hub) as a loadable module, it's easy(ish) to add into existing Tensorflow text pipelines. In an existing pipeline, BERT can replace text embedding layers like ELMO and GloVE. Alternatively, [finetuning](http://wiki.fast.ai/index.php/Fine_tuning) BERT can provide both an accuracy boost and faster training time in many cases.
#
# Here, we'll train a model to predict whether an IMDB movie review is positive or negative using BERT in Tensorflow with tf hub. Some code was adapted from [this colab notebook](https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb). Let's get started!
# In[ ]:
from sklearn.model_selection import train_test_split
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
from datetime import datetime
# In addition to the standard libraries we imported above, we'll need to install BERT's python package.
# In[ ]:
#get_ipython().system('pip install bert-tensorflow')
# In[ ]:
import bert
from bert import run_classifier
from bert import optimization
from bert import tokenization
# Below, we'll set an output directory location to store our model output and checkpoints. This can be a local directory, in which case you'd set OUTPUT_DIR to the name of the directory you'd like to create. If you're running this code in Google's hosted Colab, the directory won't persist after the Colab session ends.
#
# Alternatively, if you're a GCP user, you can store output in a GCP bucket. To do that, set a directory name in OUTPUT_DIR and the name of the GCP bucket in the BUCKET field.
#
# Set DO_DELETE to rewrite the OUTPUT_DIR if it exists. Otherwise, Tensorflow will load existing model checkpoints from that directory (if they exist).
# In[ ]:
# Set the output directory for saving model file
# Optionally, set a GCP bucket location
OUTPUT_DIR = 'aclImdb'#@param {type:"string"}
#@markdown Whether or not to clear/delete the directory and create a new one
DO_DELETE = False #@param {type:"boolean"}
#@markdown Set USE_BUCKET and BUCKET if you want to (optionally) store model output on GCP bucket.
USE_BUCKET = False #@param {type:"boolean"}
BUCKET = 'BUCKET_NAME' #@param {type:"string"}
if USE_BUCKET:
OUTPUT_DIR = 'gs://{}/{}'.format(BUCKET, OUTPUT_DIR)
from google.colab import auth
auth.authenticate_user()
if DO_DELETE:
try:
tf.gfile.DeleteRecursively(OUTPUT_DIR)
except:
# Doesn't matter if the directory didn't exist
pass
tf.gfile.MakeDirs(OUTPUT_DIR)
print('***** Model output directory: {} *****'.format(OUTPUT_DIR))
# #Data
# First, let's download the dataset, hosted by Stanford. The code below, which downloads, extracts, and imports the IMDB Large Movie Review Dataset, is borrowed from [this Tensorflow tutorial](https://www.tensorflow.org/hub/tutorials/text_classification_with_tf_hub).
# In[ ]:
from tensorflow import keras
import os
import re
# Load all files from a directory in a DataFrame.
def load_directory_data(directory):
data = {}
data["sentence"] = []
data["sentiment"] = []
for file_path in os.listdir(directory):
with tf.gfile.GFile(os.path.join(directory, file_path), "r") as f:
data["sentence"].append(f.read())
data["sentiment"].append(re.match("\d+_(\d+)\.txt", file_path).group(1))
return pd.DataFrame.from_dict(data)
# Merge positive and negative examples, add a polarity column and shuffle.
def load_dataset(directory):
pos_df = load_directory_data(os.path.join(directory, "pos"))
neg_df = load_directory_data(os.path.join(directory, "neg"))
pos_df["polarity"] = 1
neg_df["polarity"] = 0
return pd.concat([pos_df, neg_df]).sample(frac=1).reset_index(drop=True)
# Download and process the dataset files.
def download_and_load_datasets(force_download=False):
dataset = tf.keras.utils.get_file(
fname="aclImdb.tar.gz",
origin="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz",
extract=True)
train_df = load_dataset(os.path.join(os.path.dirname(dataset),
"aclImdb", "train"))
test_df = load_dataset(os.path.join(os.path.dirname(dataset),
"aclImdb", "test"))
return train_df, test_df
# In[ ]:
train, test = download_and_load_datasets()
# To keep training fast, we'll take a sample of 5000 train and test examples, respectively.
# In[ ]:
train = train.sample(5000)
test = test.sample(5000)
# In[ ]:
train.columns
# For us, our input data is the 'sentence' column and our label is the 'polarity' column (0, 1 for negative and positive, respecitvely)
# In[ ]:
DATA_COLUMN = 'sentence'
LABEL_COLUMN = 'polarity'
# label_list is the list of labels, i.e. True, False or 0, 1 or 'dog', 'cat'
label_list = [0, 1]
# #Data Preprocessing
# We'll need to transform our data into a format BERT understands. This involves two steps. First, we create `InputExample`'s using the constructor provided in the BERT library.
#
# - `text_a` is the text we want to classify, which in this case, is the `Request` field in our Dataframe.
# - `text_b` is used if we're training a model to understand the relationship between sentences (i.e. is `text_b` a translation of `text_a`? Is `text_b` an answer to the question asked by `text_a`?). This doesn't apply to our task, so we can leave `text_b` blank.
# - `label` is the label for our example, i.e. True, False
# In[ ]:
# Use the InputExample class from BERT's run_classifier code to create examples from the data
train_InputExamples = train.apply(lambda x: bert.run_classifier.InputExample(guid=None, # Globally unique ID for bookkeeping, unused in this example
text_a = x[DATA_COLUMN],
text_b = None,
label = x[LABEL_COLUMN]), axis = 1)
test_InputExamples = test.apply(lambda x: bert.run_classifier.InputExample(guid=None,
text_a = x[DATA_COLUMN],
text_b = None,
label = x[LABEL_COLUMN]), axis = 1)
# Next, we need to preprocess our data so that it matches the data BERT was trained on. For this, we'll need to do a couple of things (but don't worry--this is also included in the Python library):
#
#
# 1. Lowercase our text (if we're using a BERT lowercase model)
# 2. Tokenize it (i.e. "sally says hi" -> ["sally", "says", "hi"])
# 3. Break words into WordPieces (i.e. "calling" -> ["call", "##ing"])
# 4. Map our words to indexes using a vocab file that BERT provides
# 5. Add special "CLS" and "SEP" tokens (see the [readme](https://github.com/google-research/bert))
# 6. Append "index" and "segment" tokens to each input (see the [BERT paper](https://arxiv.org/pdf/1810.04805.pdf))
#
# Happily, we don't have to worry about most of these details.
#
#
#
# To start, we'll need to load a vocabulary file and lowercasing information directly from the BERT tf hub module:
# In[ ]:
# This is a path to an uncased (all lowercase) version of BERT
BERT_MODEL_HUB = "https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1"
#BERT_MODEL_HUB = r"F:\Learning\bert\bert_uncased_L-12_H-768_A-12\bert_uncased_L-12_H-768_A-12_1"
def create_tokenizer_from_hub_module():
"""Get the vocab file and casing info from the Hub module."""
with tf.Graph().as_default():
bert_module = hub.Module(BERT_MODEL_HUB)
tokenization_info = bert_module(signature="tokenization_info", as_dict=True)
with tf.Session() as sess:
vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"],
tokenization_info["do_lower_case"]])
return bert.tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case)
tokenizer = create_tokenizer_from_hub_module()
# Great--we just learned that the BERT model we're using expects lowercase data (that's what stored in tokenization_info["do_lower_case"]) and we also loaded BERT's vocab file. We also created a tokenizer, which breaks words into word pieces:
# In[ ]:
tokenizer.tokenize("This here's an example of using the BERT tokenizer")
# Using our tokenizer, we'll call `run_classifier.convert_examples_to_features` on our InputExamples to convert them into features BERT understands.
# In[ ]:
# We'll set sequences to be at most 128 tokens long.
MAX_SEQ_LENGTH = 128
# Convert our train and test features to InputFeatures that BERT understands.
train_features = bert.run_classifier.convert_examples_to_features(train_InputExamples, label_list, MAX_SEQ_LENGTH, tokenizer)
test_features = bert.run_classifier.convert_examples_to_features(test_InputExamples, label_list, MAX_SEQ_LENGTH, tokenizer)
# #Creating a model
#
# Now that we've prepared our data, let's focus on building a model. `create_model` does just this below. First, it loads the BERT tf hub module again (this time to extract the computation graph). Next, it creates a single new layer that will be trained to adapt BERT to our sentiment task (i.e. classifying whether a movie review is positive or negative). This strategy of using a mostly trained model is called [fine-tuning](http://wiki.fast.ai/index.php/Fine_tuning).
# In[ ]:
def create_model(is_predicting, input_ids, input_mask, segment_ids, labels,
num_labels):
"""Creates a classification model."""
bert_module = hub.Module(
BERT_MODEL_HUB,
trainable=True)
bert_inputs = dict(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids)
bert_outputs = bert_module(
inputs=bert_inputs,
signature="tokens",
as_dict=True)
# Use "pooled_output" for classification tasks on an entire sentence.
# Use "sequence_outputs" for token-level output.
output_layer = bert_outputs["pooled_output"]
hidden_size = output_layer.shape[-1].value
# Create our own layer to tune for politeness data.
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
# Dropout helps prevent overfitting
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
# Convert labels into one-hot encoding
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
predicted_labels = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32))
# If we're predicting, we want predicted labels and the probabiltiies.
if is_predicting:
return (predicted_labels, log_probs)
# If we're train/eval, compute loss between predicted and actual label
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, predicted_labels, log_probs)
# Next we'll wrap our model function in a `model_fn_builder` function that adapts our model to work for training, evaluation, and prediction.
# In[ ]:
# model_fn_builder actually creates our model function
# using the passed parameters for num_labels, learning_rate, etc.
def model_fn_builder(num_labels, learning_rate, num_train_steps,
num_warmup_steps):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)
# TRAIN and EVAL
if not is_predicting:
(loss, predicted_labels, log_probs) = create_model(
is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)
train_op = bert.optimization.create_optimizer(
loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)
# Calculate evaluation metrics.
def metric_fn(label_ids, predicted_labels):
accuracy = tf.metrics.accuracy(label_ids, predicted_labels)
f1_score = tf.contrib.metrics.f1_score(
label_ids,
predicted_labels)
auc = tf.metrics.auc(
label_ids,
predicted_labels)
recall = tf.metrics.recall(
label_ids,
predicted_labels)
precision = tf.metrics.precision(
label_ids,
predicted_labels)
true_pos = tf.metrics.true_positives(
label_ids,
predicted_labels)
true_neg = tf.metrics.true_negatives(
label_ids,
predicted_labels)
false_pos = tf.metrics.false_positives(
label_ids,
predicted_labels)
false_neg = tf.metrics.false_negatives(
label_ids,
predicted_labels)
return {
"eval_accuracy": accuracy,
"f1_score": f1_score,
"auc": auc,
"precision": precision,
"recall": recall,
"true_positives": true_pos,
"true_negatives": true_neg,
"false_positives": false_pos,
"false_negatives": false_neg
}
eval_metrics = metric_fn(label_ids, predicted_labels)
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
train_op=train_op)
else:
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
eval_metric_ops=eval_metrics)
else:
(predicted_labels, log_probs) = create_model(
is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)
predictions = {
'probabilities': log_probs,
'labels': predicted_labels
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Return the actual model function in the closure
return model_fn
# In[ ]:
# Compute train and warmup steps from batch size
# These hyperparameters are copied from this colab notebook (https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)
BATCH_SIZE = 32
LEARNING_RATE = 2e-5
NUM_TRAIN_EPOCHS = 3.0
# Warmup is a period of time where hte learning rate
# is small and gradually increases--usually helps training.
WARMUP_PROPORTION = 0.1
# Model configs
SAVE_CHECKPOINTS_STEPS = 500
SAVE_SUMMARY_STEPS = 100
# In[ ]:
# Compute # train and warmup steps from batch size
num_train_steps = int(len(train_features) / BATCH_SIZE * NUM_TRAIN_EPOCHS)
num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)
# In[ ]:
# Specify outpit directory and number of checkpoint steps to save
run_config = tf.estimator.RunConfig(
model_dir=OUTPUT_DIR,
save_summary_steps=SAVE_SUMMARY_STEPS,
save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS)
# In[ ]:
model_fn = model_fn_builder(
num_labels=len(label_list),
learning_rate=LEARNING_RATE,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config,
params={"batch_size": BATCH_SIZE})
# Next we create an input builder function that takes our training feature set (`train_features`) and produces a generator. This is a pretty standard design pattern for working with Tensorflow [Estimators](https://www.tensorflow.org/guide/estimators).
# In[ ]:
# Create an input function for training. drop_remainder = True for using TPUs.
train_input_fn = bert.run_classifier.input_fn_builder(
features=train_features,
seq_length=MAX_SEQ_LENGTH,
is_training=True,
drop_remainder=False)
# Now we train our model! For me, using a Colab notebook running on Google's GPUs, my training time was about 14 minutes.
# In[ ]:
print(f'Beginning Training!')
current_time = datetime.now()
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
print("Training took time ", datetime.now() - current_time)
# Now let's use our test data to see how well our model did:
# In[ ]:
test_input_fn = run_classifier.input_fn_builder(
features=test_features,
seq_length=MAX_SEQ_LENGTH,
is_training=False,
drop_remainder=False)
# In[ ]:
estimator.evaluate(input_fn=test_input_fn, steps=None)
# Now let's write code to make predictions on new sentences:
# In[ ]:
def getPrediction(in_sentences):
labels = ["Negative", "Positive"]
input_examples = [run_classifier.InputExample(guid="", text_a = x, text_b = None, label = 0) for x in in_sentences] # here, "" is just a dummy label
input_features = run_classifier.convert_examples_to_features(input_examples, label_list, MAX_SEQ_LENGTH, tokenizer)
predict_input_fn = run_classifier.input_fn_builder(features=input_features, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=False)
predictions = estimator.predict(predict_input_fn)
return [(sentence, prediction['probabilities'], labels[prediction['labels']]) for sentence, prediction in zip(in_sentences, predictions)]
# In[ ]:
pred_sentences = [
"That movie was absolutely awful",
"The acting was a bit lacking",
"The film was creative and surprising",
"Absolutely fantastic!"
]
# In[ ]:
predictions = getPrediction(pred_sentences)
# Voila! We have a sentiment classifier!
# In[ ]:
predictions
| 35.822642 | 470 | 0.709049 |
03ff1a599a64266599447107992fcb8683c8cecd
| 4,151 |
py
|
Python
|
viewer.py
|
mbforbes/rndjam1
|
3cb8f75a4ad7b2efb98a426caebb0641ddf773c2
|
[
"MIT"
] | 4 |
2018-06-01T15:59:17.000Z
|
2021-03-16T13:40:52.000Z
|
viewer.py
|
mbforbes/rndjam1
|
3cb8f75a4ad7b2efb98a426caebb0641ddf773c2
|
[
"MIT"
] | 10 |
2017-10-05T18:19:36.000Z
|
2019-06-26T18:11:35.000Z
|
viewer.py
|
mbforbes/rndjam1
|
3cb8f75a4ad7b2efb98a426caebb0641ddf773c2
|
[
"MIT"
] | null | null | null |
"""
For looking at data. Currently using 'visdom' web dashboard.
"""
import code
import csv
import random
from typing import List, Dict
import torch
import visdom
import constants
# globals
vis = visdom.Visdom()
# functions
def scale(orig: torch.Tensor, scale: int) -> torch.Tensor:
"""
Pixel-perfect upscaling
Arguments:
t: orig, should be 2D tensor n x m
scale: must be >= 1
Returns:
scale*n x scale*m
"""
# stupid implementation that doesn't utilize any special torch functions
# for each input point, copy, to all output points
n, m = orig.size()
new_n, new_m = scale * n, scale * m
new = torch.Tensor(new_n, new_m)
for i in range(n):
for j in range(m):
for new_i in range(i * scale, (i + 1) * scale):
for new_j in range(j * scale, (j + 1) * scale):
new[new_i, new_j] = orig[i, j]
return new
def jitter(mag: float = 0.1) -> float:
"""
TODO: elsewhere, guess jitter amt based on data.
"""
return random.uniform(-mag, mag)
def plot_jitter(data: Dict[str, List[float]], win: str = "my-scatter") -> None:
"""
data is a map from named values to the list of their data points
win is the visdom window name to plot into
"""
n = sum(len(v) for v in data.values())
t = torch.FloatTensor(n, 2)
idx = 0
keys = sorted(data.keys())
for x, k in enumerate(keys):
for y in data[k]:
t[idx, 0] = x + jitter()
t[idx, 1] = y
idx += 1
vis.scatter(
t,
win=win,
env=constants.VISDOM_ENV,
opts={
"title": win,
"xtickvals": list(range(len(keys))),
"xticklabels": keys, # {i: k for i, k in enumerate(keys)}
},
)
def plot_bar(
x: torch.Tensor, legend: List[str] = [], win: str = "my-bar", opts={}
) -> None:
"""
Arguments:
TODO
"""
baseopts = dict(title=win, legend=legend)
vis.bar(x, win=win, env=constants.VISDOM_ENV, opts={**baseopts, **opts})
def plot_line(
x: torch.Tensor,
ys: torch.Tensor,
legend: List[str] = [],
win: str = "my-line",
opts={},
) -> None:
"""
Arguments:
x: 1d (N) x values
ys: 1d (N) y values, or
2d (M x N) y values for M lines, one row per line
"""
if len(ys.size()) > 1:
ys = ys.t()
baseopts = dict(title=win, legend=legend)
vis.line(ys, x, win=win, env=constants.VISDOM_ENV, opts={**baseopts, **opts})
def view_train_datum(n: int = 0):
"""
Arguments:
n: which datum to view (0-based indexing)
"""
# read the desired row from the csv
img_list = None
with open(constants.TRAIN_RESPLIT, "r") as f:
for i, row in enumerate(csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)):
if i == n:
img_list = row
break
if img_list is None:
print("ERROR: n ({}) was too large. should be <= {}".format(n, i))
return
# transform it to view it in its normal size
# visdom takes C x H x W
# - we only have 1 channel (b/w) so this is fine
# - H = how many rows, W = how many columns
# - the data is laid out as row0, then row1, ..., and that seems to be how
# view(...) creates the tensor, so this works.
# - unsqueeze just creates a new dimension
label = int(img_list[0])
img_vector = torch.Tensor(img_list[1:])
img_matrix = img_vector.view(28, 28)
img_tensor = img_matrix.unsqueeze(0)
vis.image(
img_tensor,
win="demo image",
env=constants.VISDOM_ENV,
opts={"caption": "this should be a {}".format(label)},
)
# NOTE: could use vis.images.(...) to view 10 of them in a row. would use
# torch.stack(...).
# view it bigger
bigger = scale(img_matrix, 10).unsqueeze(0)
vis.image(
bigger,
win="demo image expanded",
env=constants.VISDOM_ENV,
opts={"caption": "this should be a bigger {}".format(label)},
)
def main():
view_train_datum(0)
if __name__ == "__main__":
main()
| 25.157576 | 81 | 0.564442 |
65e8f162e35ffd4111c90c4923c8754665df43ad
| 1,431 |
py
|
Python
|
am/legislative/management/body_membership_copy.py
|
access-missouri/am-django-project
|
2457b8089900c61c73000c1d7479b7a72f6d1855
|
[
"BSD-2-Clause"
] | 4 |
2018-05-01T20:31:49.000Z
|
2021-12-20T19:30:40.000Z
|
am/legislative/management/body_membership_copy.py
|
access-missouri/am-django-project
|
2457b8089900c61c73000c1d7479b7a72f6d1855
|
[
"BSD-2-Clause"
] | 22 |
2017-04-13T15:02:09.000Z
|
2021-02-02T21:48:41.000Z
|
am/legislative/management/body_membership_copy.py
|
access-missouri/am-django-project
|
2457b8089900c61c73000c1d7479b7a72f6d1855
|
[
"BSD-2-Clause"
] | 1 |
2018-07-02T20:08:43.000Z
|
2018-07-02T20:08:43.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utility functions for copying BodyMembership rows.
"""
from legislative.models import BodyMembership
def get_or_copy_body_membership_to_session(membership, session):
"""
Like ModelManager.get_or_create() only for copying BodyMembership.
:param membership: The BodyMembership to copy.
:param session: The session to copy into.
:return: A tuple of the copied(or not) membership and whether it was created.
"""
membership_copy, created = BodyMembership.objects.get_or_create(
person=membership.person,
body=membership.body,
session=session,
district=membership.district
)
return (membership_copy, created)
def replicate_all_body_memberships_into_session(source, destination):
"""
Take all the memberships from a given session and make sure they're in the new session.
:param source: Session to copy BodyMemberships from.
:param destination: Session to copy BodyMemberships to.
:return: The number of fresh BodyMemberships created.
"""
memberships_created = 0
for membership_old in source.body_memberships.all():
membership_new, created = get_or_copy_body_membership_to_session(membership_old,
destination)
if created:
memberships_created += 1
return memberships_created
| 32.522727 | 91 | 0.689727 |
1514c0868be0bc39f475fd991939a34e88e872df
| 236 |
py
|
Python
|
website/addons/dataverse/settings/defaults.py
|
DanielSBrown/osf.io
|
98dda2ac237377197acacce78274bc0a4ce8f303
|
[
"Apache-2.0"
] | 1 |
2015-10-02T18:35:53.000Z
|
2015-10-02T18:35:53.000Z
|
website/addons/dataverse/settings/defaults.py
|
DanielSBrown/osf.io
|
98dda2ac237377197acacce78274bc0a4ce8f303
|
[
"Apache-2.0"
] | 4 |
2016-05-13T14:24:16.000Z
|
2017-03-30T15:28:31.000Z
|
website/addons/dataverse/settings/defaults.py
|
DanielSBrown/osf.io
|
98dda2ac237377197acacce78274bc0a4ce8f303
|
[
"Apache-2.0"
] | null | null | null |
DEFAULT_HOSTS = [
'dataverse.harvard.edu', # Harvard PRODUCTION server
'demo.dataverse.org', # Harvard DEMO server
'apitest.dataverse.org', # Dataverse TEST server
]
REQUEST_TIMEOUT = 15
| 29.5 | 67 | 0.605932 |
ac82c59c5653917357c87555134ebfefd24daada
| 4,680 |
py
|
Python
|
examples/chatbot/chatbot_example.py
|
bhaskar2443053/forte
|
95fabd94126d45c0db07cdcc197049ed1859d228
|
[
"Apache-2.0"
] | null | null | null |
examples/chatbot/chatbot_example.py
|
bhaskar2443053/forte
|
95fabd94126d45c0db07cdcc197049ed1859d228
|
[
"Apache-2.0"
] | null | null | null |
examples/chatbot/chatbot_example.py
|
bhaskar2443053/forte
|
95fabd94126d45c0db07cdcc197049ed1859d228
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from termcolor import colored
import torch
from fortex.nltk import NLTKSentenceSegmenter, NLTKWordTokenizer, NLTKPOSTagger
from forte.common.configuration import Config
from forte.data.multi_pack import MultiPack
from forte.data.readers import MultiPackTerminalReader
from forte.common.resources import Resources
from forte.pipeline import Pipeline
from forte.processors.third_party import MicrosoftBingTranslator
from forte.processors.nlp.srl_predictor import SRLPredictor
from forte.processors.ir import SearchProcessor, BertBasedQueryCreator
from forte.data.selector import NameMatchSelector
from ft.onto.base_ontology import PredicateLink, Sentence
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def setup(config: Config) -> Pipeline:
resource = Resources()
query_pipeline = Pipeline[MultiPack](resource=resource)
query_pipeline.set_reader(
reader=MultiPackTerminalReader(), config=config.reader
)
query_pipeline.add(
component=MicrosoftBingTranslator(), config=config.translator
)
query_pipeline.add(
component=BertBasedQueryCreator(), config=config.query_creator
)
query_pipeline.add(component=SearchProcessor(), config=config.searcher)
top_response_pack_name = config.indexer.response_pack_name_prefix + "_0"
query_pipeline.add(
component=NLTKSentenceSegmenter(),
selector=NameMatchSelector(select_name=top_response_pack_name),
)
query_pipeline.add(
component=NLTKWordTokenizer(),
selector=NameMatchSelector(select_name=top_response_pack_name),
)
query_pipeline.add(
component=NLTKPOSTagger(),
selector=NameMatchSelector(select_name=top_response_pack_name),
)
query_pipeline.add(
component=SRLPredictor(),
config=config.SRL,
selector=NameMatchSelector(select_name=top_response_pack_name),
)
query_pipeline.add(
component=MicrosoftBingTranslator(), config=config.back_translator
)
query_pipeline.initialize()
return query_pipeline
def main(config: Config):
query_pipeline = setup(config)
resource = query_pipeline.resource
m_pack: MultiPack
for m_pack in query_pipeline.process_dataset():
# update resource to be used in the next conversation
query_pack = m_pack.get_pack(config.translator.in_pack_name)
if resource.get("user_utterance"):
resource.get("user_utterance").append(query_pack)
else:
resource.update(user_utterance=[query_pack])
response_pack = m_pack.get_pack(config.back_translator.in_pack_name)
if resource.get("bot_utterance"):
resource.get("bot_utterance").append(response_pack)
else:
resource.update(bot_utterance=[response_pack])
english_pack = m_pack.get_pack("pack")
print(
colored("English Translation of the query: ", "green"),
english_pack.text,
"\n",
)
# Just take the first pack.
pack = m_pack.get_pack(config.indexer.response_pack_name_prefix + "_0")
print(colored("Retrieved Document", "green"), pack.text, "\n")
print(
colored("German Translation", "green"),
m_pack.get_pack("response").text,
"\n",
)
for sentence in pack.get(Sentence):
sent_text = sentence.text
print(colored("Sentence:", "red"), sent_text, "\n")
print(colored("Semantic role labels:", "red"))
for link in pack.get(PredicateLink, sentence):
parent = link.get_parent()
child = link.get_child()
print(
f' - "{child.text}" is role '
f"{link.arg_type} of "
f'predicate "{parent.text}"'
)
print()
input(colored("Press ENTER to continue...\n", "green"))
if __name__ == "__main__":
all_config = Config(yaml.safe_load(open("config.yml", "r")), None)
main(all_config)
| 35.725191 | 79 | 0.68547 |
0edf9e8befe3275eff95720da44bf00a92d494d7
| 3,875 |
py
|
Python
|
install_op.py
|
parkside-securities/1password-client
|
c890b3b1f647fa8620e95d0aecf85c44851ce8aa
|
[
"MIT"
] | null | null | null |
install_op.py
|
parkside-securities/1password-client
|
c890b3b1f647fa8620e95d0aecf85c44851ce8aa
|
[
"MIT"
] | null | null | null |
install_op.py
|
parkside-securities/1password-client
|
c890b3b1f647fa8620e95d0aecf85c44851ce8aa
|
[
"MIT"
] | null | null | null |
import os
import wget
import zipfile
import platform
from subprocess import Popen, PIPE
platform_links = {
"Darwin": {
"x86_64": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_darwin_amd64_v1.8.0.pkg",
"download_loc": "/usr/local/bin/"
},
"FreeBSD": {
"i386": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_freebsd_386_v1.8.0.zip",
"i686": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_freebsd_386_v1.8.0.zip",
"x86_64": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_freebsd_amd64_v1.8.0.zip",
"arm": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_freebsd_arm_v1.8.0.zip",
"aarch64_be": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_freebsd_arm_v1.8.0.zip",
"aarch64": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_freebsd_arm_v1.8.0.zip",
"armv8b": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_freebsd_arm_v1.8.0.zip",
"armv8l": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_freebsd_arm_v1.8.0.zip",
"download_loc": "/usr/local/bin/"
},
"Linux": {
"i386": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_linux_386_v1.8.0.zip",
"i686": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_linux_386_v1.8.0.zip",
"x86_64": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_linux_amd64_v1.8.0.zip",
"aarch64_be": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_linux_arm_v1.8.0.zip",
"aarch64": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_linux_arm_v1.8.0.zip",
"armv8b": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_linux_arm_v1.8.0.zip",
"armv8l": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_linux_arm_v1.8.0.zip",
"arm": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_linux_arm_v1.8.0.zip",
"download_loc": "/usr/local/bin/"
},
"OpenBSD": {
"i386": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_openbsd_386_v1.8.0.zip",
"i686": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_openbsd_386_v1.8.0.zip",
"x86_64": "https://cache.agilebits.com/dist/1P/op/pkg/v1.8.0/op_openbsd_amd64_v1.8.0.zip",
"download_loc": "/usr/local/bin/"
}
}
def read_bash_return(cmd, single=True):
process = os.popen(cmd)
preprocessed = process.read()
process.close()
if single:
return str(preprocessed.split("\n")[0])
else:
return str(preprocessed)
def check_install_required(): # pragma: no cover
"""
Helper function to check if op cli is already installed
:returns: :obj:`bool`: True or False
"""
op = read_bash_return("op --version")
if op == "":
return True
else:
return False
def install_op(): # pragma: no cover
"""
Helper function to download, unzip, install and chmod op cli files
"""
system = str(platform.system())
machine = str(platform.machine())
link = platform_links[system][machine]
local_bin = platform_links[system]["download_loc"]
os.chmod(local_bin, 0o755)
op_file = link.split("/")[-1]
download_path = os.path.join(local_bin, op_file)
print('Downloading the 1Password CLI: {}'.format(op_file))
wget.download(link, download_path)
if link[-4:] != ".pkg":
zip_ref = zipfile.ZipFile(download_path, 'r')
zip_ref.extractall(local_bin)
zip_ref.close()
os.chmod(os.path.join(local_bin, 'op'), 0o755)
else:
if check_install_required():
Popen(["open", os.path.join(local_bin, op_file)], stdin=PIPE, stdout=PIPE) # pragma: no cover
else:
pass
def install_chocolatey():
"""
Helper function for installing Windows package management requires that installation performed in admin role
"""
pass
| 39.540816 | 112 | 0.641806 |
2219bf5399730b5dceb96a30371abf51b0affb36
| 1,082 |
py
|
Python
|
backend/users/models/blockage.py
|
appheap/social-media-analyzer
|
0f9da098bfb0b4f9eb38e0244aa3a168cf97d51c
|
[
"Apache-2.0"
] | 5 |
2021-09-11T22:01:15.000Z
|
2022-03-16T21:33:42.000Z
|
backend/users/models/blockage.py
|
iamatlasss/social-media-analyzer
|
429d1d2bbd8bfce80c50c5f8edda58f87ace668d
|
[
"Apache-2.0"
] | null | null | null |
backend/users/models/blockage.py
|
iamatlasss/social-media-analyzer
|
429d1d2bbd8bfce80c50c5f8edda58f87ace668d
|
[
"Apache-2.0"
] | 3 |
2022-01-18T11:06:22.000Z
|
2022-02-26T13:39:28.000Z
|
from django.db import models
from db import models as db_models
class TimeZones(models.TextChoices):
UTC = 'UTC'
ASIA_TEHRAN = 'Asia/Tehran'
class BlockageTypes(models.TextChoices):
OTHER = 'OTHER'
FOREVER = 'FOREVER'
TEMPORARY = 'TEMPORARY'
SPAM = 'SPAM'
class Blockage(db_models.BaseModel):
blocked = models.BooleanField(default=False)
blocked_ts = models.BigIntegerField(null=True, blank=True)
blocked_reason = models.CharField(
max_length=256,
null=True, blank=True,
)
blocked_type = models.CharField(
BlockageTypes.choices,
max_length=255,
null=True, blank=True,
)
blocked_until_ts = models.BigIntegerField(
null=True, blank=True,
default=0,
)
##################################################
# `user` : the user who this blockage belongs to
# `telegram_account` : the telegram account this blockage belongs to
# `telegram_channel` : the telegram channel this blockage belongs to
def __str__(self):
return self.blocked_reason
| 25.761905 | 72 | 0.640481 |
5e15d0512273d3b6bd08e6f36bf0e78198ebfb18
| 18,080 |
py
|
Python
|
uvicorn/protocols/http/h11_impl.py
|
grouchoboy/uvicorn
|
ae0fd316f03dbef926e40216024dfb934417d48d
|
[
"BSD-3-Clause"
] | 2 |
2021-04-01T08:46:05.000Z
|
2021-04-01T08:46:07.000Z
|
uvicorn/protocols/http/h11_impl.py
|
grouchoboy/uvicorn
|
ae0fd316f03dbef926e40216024dfb934417d48d
|
[
"BSD-3-Clause"
] | 9 |
2021-04-12T13:44:34.000Z
|
2021-04-13T16:50:08.000Z
|
uvicorn/protocols/http/h11_impl.py
|
grouchoboy/uvicorn
|
ae0fd316f03dbef926e40216024dfb934417d48d
|
[
"BSD-3-Clause"
] | 2 |
2020-04-03T09:49:18.000Z
|
2020-04-03T10:01:50.000Z
|
import asyncio
import http
import logging
from urllib.parse import unquote
import h11
from uvicorn.protocols.utils import (
get_client_addr,
get_local_addr,
get_path_with_query_string,
get_remote_addr,
is_ssl,
)
def _get_status_phrase(status_code):
try:
return http.HTTPStatus(status_code).phrase.encode()
except ValueError:
return b""
STATUS_PHRASES = {
status_code: _get_status_phrase(status_code) for status_code in range(100, 600)
}
HIGH_WATER_LIMIT = 65536
TRACE_LOG_LEVEL = 5
class FlowControl:
def __init__(self, transport):
self._transport = transport
self.read_paused = False
self.write_paused = False
self._is_writable_event = asyncio.Event()
self._is_writable_event.set()
async def drain(self):
await self._is_writable_event.wait()
def pause_reading(self):
if not self.read_paused:
self.read_paused = True
self._transport.pause_reading()
def resume_reading(self):
if self.read_paused:
self.read_paused = False
self._transport.resume_reading()
def pause_writing(self):
if not self.write_paused:
self.write_paused = True
self._is_writable_event.clear()
def resume_writing(self):
if self.write_paused:
self.write_paused = False
self._is_writable_event.set()
async def service_unavailable(scope, receive, send):
await send(
{
"type": "http.response.start",
"status": 503,
"headers": [
(b"content-type", b"text/plain; charset=utf-8"),
(b"connection", b"close"),
],
}
)
await send({"type": "http.response.body", "body": b"Service Unavailable"})
class H11Protocol(asyncio.Protocol):
def __init__(self, config, server_state, _loop=None):
if not config.loaded:
config.load()
self.config = config
self.app = config.loaded_app
self.loop = _loop or asyncio.get_event_loop()
self.logger = logging.getLogger("uvicorn.error")
self.access_logger = logging.getLogger("uvicorn.access")
self.access_log = self.access_logger.hasHandlers()
self.conn = h11.Connection(h11.SERVER)
self.ws_protocol_class = config.ws_protocol_class
self.root_path = config.root_path
self.limit_concurrency = config.limit_concurrency
# Timeouts
self.timeout_keep_alive_task = None
self.timeout_keep_alive = config.timeout_keep_alive
# Shared server state
self.server_state = server_state
self.connections = server_state.connections
self.tasks = server_state.tasks
self.default_headers = server_state.default_headers
# Per-connection state
self.transport = None
self.flow = None
self.server = None
self.client = None
self.scheme = None
# Per-request state
self.scope = None
self.headers = None
self.cycle = None
self.message_event = asyncio.Event()
# Protocol interface
def connection_made(self, transport):
self.connections.add(self)
self.transport = transport
self.flow = FlowControl(transport)
self.server = get_local_addr(transport)
self.client = get_remote_addr(transport)
self.scheme = "https" if is_ssl(transport) else "http"
if self.logger.level <= TRACE_LOG_LEVEL:
prefix = "%s:%d - " % tuple(self.client) if self.client else ""
self.logger.log(TRACE_LOG_LEVEL, "%sConnection made", prefix)
def connection_lost(self, exc):
self.connections.discard(self)
if self.logger.level <= TRACE_LOG_LEVEL:
prefix = "%s:%d - " % tuple(self.client) if self.client else ""
self.logger.log(TRACE_LOG_LEVEL, "%sConnection lost", prefix)
if self.cycle and not self.cycle.response_complete:
self.cycle.disconnected = True
if self.conn.our_state != h11.ERROR:
event = h11.ConnectionClosed()
try:
self.conn.send(event)
except h11.LocalProtocolError:
# Premature client disconnect
pass
self.message_event.set()
if self.flow is not None:
self.flow.resume_writing()
def eof_received(self):
pass
def data_received(self, data):
if self.timeout_keep_alive_task is not None:
self.timeout_keep_alive_task.cancel()
self.timeout_keep_alive_task = None
self.conn.receive_data(data)
self.handle_events()
def handle_events(self):
while True:
try:
event = self.conn.next_event()
except h11.RemoteProtocolError:
msg = "Invalid HTTP request received."
self.logger.warning(msg)
self.transport.close()
return
event_type = type(event)
if event_type is h11.NEED_DATA:
break
elif event_type is h11.PAUSED:
# This case can occur in HTTP pipelining, so we need to
# stop reading any more data, and ensure that at the end
# of the active request/response cycle we handle any
# events that have been buffered up.
self.flow.pause_reading()
break
elif event_type is h11.Request:
self.headers = [(key.lower(), value) for key, value in event.headers]
raw_path, _, query_string = event.target.partition(b"?")
self.scope = {
"type": "http",
"asgi": {
"version": self.config.asgi_version,
"spec_version": "2.1",
},
"http_version": event.http_version.decode("ascii"),
"server": self.server,
"client": self.client,
"scheme": self.scheme,
"method": event.method.decode("ascii"),
"root_path": self.root_path,
"path": unquote(raw_path.decode("ascii")),
"raw_path": raw_path,
"query_string": query_string,
"headers": self.headers,
}
for name, value in self.headers:
if name == b"connection":
tokens = [token.lower().strip() for token in value.split(b",")]
if b"upgrade" in tokens:
self.handle_upgrade(event)
return
# Handle 503 responses when 'limit_concurrency' is exceeded.
if self.limit_concurrency is not None and (
len(self.connections) >= self.limit_concurrency
or len(self.tasks) >= self.limit_concurrency
):
app = service_unavailable
message = "Exceeded concurrency limit."
self.logger.warning(message)
else:
app = self.app
self.cycle = RequestResponseCycle(
scope=self.scope,
conn=self.conn,
transport=self.transport,
flow=self.flow,
logger=self.logger,
access_logger=self.access_logger,
access_log=self.access_log,
default_headers=self.default_headers,
message_event=self.message_event,
on_response=self.on_response_complete,
)
task = self.loop.create_task(self.cycle.run_asgi(app))
task.add_done_callback(self.tasks.discard)
self.tasks.add(task)
elif event_type is h11.Data:
if self.conn.our_state is h11.DONE:
continue
self.cycle.body += event.data
if len(self.cycle.body) > HIGH_WATER_LIMIT:
self.flow.pause_reading()
self.message_event.set()
elif event_type is h11.EndOfMessage:
if self.conn.our_state is h11.DONE:
self.transport.resume_reading()
self.conn.start_next_cycle()
continue
self.cycle.more_body = False
self.message_event.set()
def handle_upgrade(self, event):
upgrade_value = None
for name, value in self.headers:
if name == b"upgrade":
upgrade_value = value.lower()
if upgrade_value != b"websocket" or self.ws_protocol_class is None:
msg = "Unsupported upgrade request."
self.logger.warning(msg)
reason = STATUS_PHRASES[400]
headers = [
(b"content-type", b"text/plain; charset=utf-8"),
(b"connection", b"close"),
]
event = h11.Response(status_code=400, headers=headers, reason=reason)
output = self.conn.send(event)
self.transport.write(output)
event = h11.Data(data=b"Unsupported upgrade request.")
output = self.conn.send(event)
self.transport.write(output)
event = h11.EndOfMessage()
output = self.conn.send(event)
self.transport.write(output)
self.transport.close()
return
self.connections.discard(self)
output = [event.method, b" ", event.target, b" HTTP/1.1\r\n"]
for name, value in self.headers:
output += [name, b": ", value, b"\r\n"]
output.append(b"\r\n")
protocol = self.ws_protocol_class(
config=self.config, server_state=self.server_state
)
protocol.connection_made(self.transport)
protocol.data_received(b"".join(output))
self.transport.set_protocol(protocol)
def on_response_complete(self):
self.server_state.total_requests += 1
if self.transport.is_closing():
return
# Set a short Keep-Alive timeout.
self.timeout_keep_alive_task = self.loop.call_later(
self.timeout_keep_alive, self.timeout_keep_alive_handler
)
# Unpause data reads if needed.
self.flow.resume_reading()
# Unblock any pipelined events.
if self.conn.our_state is h11.DONE and self.conn.their_state is h11.DONE:
self.conn.start_next_cycle()
self.handle_events()
def shutdown(self):
"""
Called by the server to commence a graceful shutdown.
"""
if self.cycle is None or self.cycle.response_complete:
event = h11.ConnectionClosed()
self.conn.send(event)
self.transport.close()
else:
self.cycle.keep_alive = False
def pause_writing(self):
"""
Called by the transport when the write buffer exceeds the high water mark.
"""
self.flow.pause_writing()
def resume_writing(self):
"""
Called by the transport when the write buffer drops below the low water mark.
"""
self.flow.resume_writing()
def timeout_keep_alive_handler(self):
"""
Called on a keep-alive connection if no new data is received after a short delay.
"""
if not self.transport.is_closing():
event = h11.ConnectionClosed()
self.conn.send(event)
self.transport.close()
class RequestResponseCycle:
def __init__(
self,
scope,
conn,
transport,
flow,
logger,
access_logger,
access_log,
default_headers,
message_event,
on_response,
):
self.scope = scope
self.conn = conn
self.transport = transport
self.flow = flow
self.logger = logger
self.access_logger = access_logger
self.access_log = access_log
self.default_headers = default_headers
self.message_event = message_event
self.on_response = on_response
# Connection state
self.disconnected = False
self.keep_alive = True
self.waiting_for_100_continue = conn.they_are_waiting_for_100_continue
# Request state
self.body = b""
self.more_body = True
# Response state
self.response_started = False
self.response_complete = False
# ASGI exception wrapper
async def run_asgi(self, app):
try:
result = await app(self.scope, self.receive, self.send)
except BaseException as exc:
msg = "Exception in ASGI application\n"
self.logger.error(msg, exc_info=exc)
if not self.response_started:
await self.send_500_response()
else:
self.transport.close()
else:
if result is not None:
msg = "ASGI callable should return None, but returned '%s'."
self.logger.error(msg, result)
self.transport.close()
elif not self.response_started and not self.disconnected:
msg = "ASGI callable returned without starting response."
self.logger.error(msg)
await self.send_500_response()
elif not self.response_complete and not self.disconnected:
msg = "ASGI callable returned without completing response."
self.logger.error(msg)
self.transport.close()
finally:
self.on_response = None
async def send_500_response(self):
await self.send(
{
"type": "http.response.start",
"status": 500,
"headers": [
(b"content-type", b"text/plain; charset=utf-8"),
(b"connection", b"close"),
],
}
)
await self.send(
{"type": "http.response.body", "body": b"Internal Server Error"}
)
# ASGI interface
async def send(self, message):
message_type = message["type"]
if self.flow.write_paused and not self.disconnected:
await self.flow.drain()
if self.disconnected:
return
if not self.response_started:
# Sending response status line and headers
if message_type != "http.response.start":
msg = "Expected ASGI message 'http.response.start', but got '%s'."
raise RuntimeError(msg % message_type)
self.response_started = True
self.waiting_for_100_continue = False
status_code = message["status"]
headers = self.default_headers + message.get("headers", [])
if self.access_log:
self.access_logger.info(
'%s - "%s %s HTTP/%s" %d',
get_client_addr(self.scope),
self.scope["method"],
get_path_with_query_string(self.scope),
self.scope["http_version"],
status_code,
extra={"status_code": status_code, "scope": self.scope},
)
# Write response status line and headers
reason = STATUS_PHRASES[status_code]
event = h11.Response(
status_code=status_code, headers=headers, reason=reason
)
output = self.conn.send(event)
self.transport.write(output)
elif not self.response_complete:
# Sending response body
if message_type != "http.response.body":
msg = "Expected ASGI message 'http.response.body', but got '%s'."
raise RuntimeError(msg % message_type)
body = message.get("body", b"")
more_body = message.get("more_body", False)
# Write response body
if self.scope["method"] == "HEAD":
event = h11.Data(data=b"")
else:
event = h11.Data(data=body)
output = self.conn.send(event)
self.transport.write(output)
# Handle response completion
if not more_body:
self.response_complete = True
event = h11.EndOfMessage()
output = self.conn.send(event)
self.transport.write(output)
else:
# Response already sent
msg = "Unexpected ASGI message '%s' sent, after response already completed."
raise RuntimeError(msg % message_type)
if self.response_complete:
if self.conn.our_state is h11.MUST_CLOSE or not self.keep_alive:
event = h11.ConnectionClosed()
self.conn.send(event)
self.transport.close()
self.on_response()
async def receive(self):
if self.waiting_for_100_continue and not self.transport.is_closing():
event = h11.InformationalResponse(
status_code=100, headers=[], reason="Continue"
)
output = self.conn.send(event)
self.transport.write(output)
self.waiting_for_100_continue = False
if not self.disconnected and not self.response_complete:
self.flow.resume_reading()
await self.message_event.wait()
self.message_event.clear()
if self.disconnected or self.response_complete:
message = {"type": "http.disconnect"}
else:
message = {
"type": "http.request",
"body": self.body,
"more_body": self.more_body,
}
self.body = b""
return message
| 34.177694 | 89 | 0.558573 |
9778f1964d0f312479e92cb60ba32d5d79c5b22e
| 2,770 |
py
|
Python
|
python_modules/libraries/dagster-pagerduty/dagster_pagerduty/hooks.py
|
joshuataylor/dagster
|
7ed9c52eb1d30d0aea99e4e9339de3d0bc5c3035
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-pagerduty/dagster_pagerduty/hooks.py
|
joshuataylor/dagster
|
7ed9c52eb1d30d0aea99e4e9339de3d0bc5c3035
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-pagerduty/dagster_pagerduty/hooks.py
|
joshuataylor/dagster
|
7ed9c52eb1d30d0aea99e4e9339de3d0bc5c3035
|
[
"Apache-2.0"
] | null | null | null |
from typing import Callable, Optional
from dagster.core.definitions import failure_hook
from dagster.core.execution.context.system import HookContext
def _default_summary_fn(context: HookContext) -> str:
return "Solid {solid_name} on pipeline {pipeline_name} failed!".format(
solid_name=context.solid.name, pipeline_name=context.pipeline_name,
)
def _dedup_key_fn(context: HookContext) -> str:
return "{pipeline_name}|{solid_name}".format(
pipeline_name=context.pipeline_name, solid_name=context.solid.name,
)
def _source_fn(context: HookContext):
return "{pipeline_name}".format(pipeline_name=context.pipeline_name)
def pagerduty_on_failure(
severity: str,
summary_fn: Callable[[HookContext], str] = _default_summary_fn,
dagit_base_url: Optional[str] = None,
):
"""Create a hook on step failure events that will trigger a PagerDuty alert.
Args:
severity (str): How impacted the affected system is. Displayed to users in lists and
influences the priority of any created incidents. Must be one of {info, warning, error, critical}
summary_fn (Optional(Callable[[HookContext], str])): Function which takes in the HookContext
outputs a summary of the issue.
dagit_base_url: (Optional[str]): The base url of your Dagit instance. Specify this to allow
alerts to include deeplinks to the specific pipeline run that triggered the hook.
Examples:
.. code-block:: python
@pagerduty_on_failure("info", dagit_base_url="http://localhost:3000")
@pipeline(...)
def my_pipeline():
pass
.. code-block:: python
def my_summary_fn(context: HookContext) -> str:
return "Solid {solid_name} failed!".format(
solid_name=context.solid
)
@solid
def a_solid(context):
pass
@pipeline(...)
def my_pipeline():
a_solid.with_hooks(hook_defs={pagerduty_on_failure(severity="critical", summary_fn=my_summary_fn)})
"""
@failure_hook(required_resource_keys={"pagerduty"})
def _hook(context: HookContext):
custom_details = {}
if dagit_base_url:
custom_details = {
"dagit url": "{base_url}/instance/runs/{run_id}".format(
base_url=dagit_base_url, run_id=context.run_id
)
}
context.resources.pagerduty.EventV2_create(
summary=summary_fn(context),
source=_source_fn(context),
severity=severity,
dedup_key=_dedup_key_fn(context),
custom_details=custom_details,
)
return _hook
| 34.197531 | 115 | 0.641516 |
78a696ab9d55599cd5719f34bc92b424fba9a6e2
| 2,804 |
py
|
Python
|
Python/example_controllers/flex_soft_body.py
|
felixbinder/tdw
|
eb2b00b74b9fcf8ef2dcba1baa62424640c520b1
|
[
"BSD-2-Clause"
] | 307 |
2020-05-20T18:08:49.000Z
|
2022-03-21T19:55:08.000Z
|
Python/example_controllers/flex_soft_body.py
|
felixbinder/tdw
|
eb2b00b74b9fcf8ef2dcba1baa62424640c520b1
|
[
"BSD-2-Clause"
] | 92 |
2020-07-21T18:29:13.000Z
|
2022-03-28T07:25:54.000Z
|
Python/example_controllers/flex_soft_body.py
|
felixbinder/tdw
|
eb2b00b74b9fcf8ef2dcba1baa62424640c520b1
|
[
"BSD-2-Clause"
] | 53 |
2020-07-14T15:55:17.000Z
|
2022-03-20T16:20:01.000Z
|
from tdw.controller import Controller
from tdw.output_data import FlexParticles
from tdw.tdw_utils import TDWUtils
"""
Create a soft-body object with the NVIDIA Flex physics engine.
"""
class FlexSoftBody(Controller):
def run(self):
self.load_streamed_scene(scene="tdw_room")
self.communicate({"$type": "set_time_step", "time_step": 0.02})
# Create the container.
self.communicate({"$type": "create_flex_container",
"particle_size": 0.1,
"collision_distance": 0.025,
"solid_rest": 0.1})
# Create the avatar.
self.communicate(TDWUtils.create_avatar(position={"x": -1.5, "y": 0.85, "z": -0.5}))
# Add the object.
object_id = self.add_object("linbrazil_diz_armchair",
position={"x": 0.0, "y": 2.0, "z": 0.0},
rotation={"x": 25.0, "y": 45.0, "z": -40.0},
library="models_core.json")
# Set the object to kinematic.
# Set the soft actor.
# Assign the actor's container.
self.communicate([{"$type": "set_kinematic_state",
"id": object_id},
{"$type": "set_flex_soft_actor",
"id": object_id,
"skinning_falloff": 0.5,
"volume_sampling": 1.0,
"mass_scale": 1.0,
"cluster_stiffness": 0.2,
"cluster_spacing": 0.2,
"cluster_radius": 0.2,
"link_radius": 0,
"link_stiffness": 1.0,
"particle_spacing": 0.025},
{"$type": "assign_flex_container",
"id": object_id,
"container_id": 0}
])
# Send particles data.
resp = self.communicate([{"$type": "send_flex_particles",
"frequency": "always"}])
# Output example data.
particles = FlexParticles(resp[0])
for j in range(particles.get_num_objects()):
print(particles.get_id(j))
print(particles.get_velocities(j))
print(particles.get_particles(j))
for i in range(1000):
# Look at the object.
self.communicate({"$type": "look_at",
"avatar_id": "a",
"object_id": object_id,
"use_centroid": True})
if __name__ == "__main__":
FlexSoftBody().run()
| 37.891892 | 93 | 0.451141 |
28af6c700f52529f96ed910e66929d7ee389e4c5
| 3,866 |
py
|
Python
|
commons/tgraph.py
|
oeg-upm/tada-entity
|
6e538129229bed49bf1aa960fcd97a8468eca765
|
[
"Apache-2.0"
] | 3 |
2019-06-11T10:19:25.000Z
|
2022-02-28T22:58:29.000Z
|
commons/tgraph.py
|
oeg-upm/tada-entity
|
6e538129229bed49bf1aa960fcd97a8468eca765
|
[
"Apache-2.0"
] | 7 |
2019-02-04T08:57:54.000Z
|
2021-11-01T12:42:03.000Z
|
commons/tgraph.py
|
oeg-upm/tada-entity
|
6e538129229bed49bf1aa960fcd97a8468eca765
|
[
"Apache-2.0"
] | null | null | null |
class Node:
def __init__(self):
self.class_uri = ""
self.Ic = None
self.Lc = None
self.fc = None
self.Is = None
self.Ls = None
self.fs = dict()
self.f = dict()
# self.fs = None
# self.fs1 = None
# self.fs2 = None
# self.fs3 = None
# self.fs4 = None
# self.fs5 = None
# self.f = None
# self.f1 = None
# self.f2 = None
# self.f3 = None
# self.f4 = None
# self.f5 = None
self.parents = dict() # to nodes
self.childs = dict() # to nodes
def clear(self):
self.Ic = None
self.Lc = None
self.fc = None
self.Is = None
self.Ls = None
self.fs = dict()
self.f = dict()
# self.fs = None
# self.fs1 = None
# self.fs2 = None
# self.fs3 = None
# self.fs4 = None
# self.fs5 = None
# # self.f = None
# self.f1 = None
# self.f2 = None
# self.f3 = None
# self.f4 = None
# self.f5 = None
def print(self):
print("fc: %s" % str(self.fc))
for i in range(1, 6):
if i in self.fs:
print("fs%d: %s" % (i, str(self.fs[i])))
else:
print("fs%d: does not exist" % i)
print("f: %s" % str(self.f))
class TGraph:
def __init__(self):
self.nodes = dict() # to nodes
self.roots = dict() # to nodes
self.m = 0
def clear_for_reuse(self):
self.m = 0
for class_uri in self.nodes:
node = self.nodes[class_uri]
node.Ic = node.Lc = node.fc = None
node.f = dict()
def add_class(self, class_uri):
if class_uri in self.nodes:
return False
else:
# print("add class: "+class_uri)
self.nodes[class_uri] = Node()
self.nodes[class_uri].class_uri = class_uri
return True
def get_parents(self, class_uri):
if class_uri in self.nodes:
return list(self.nodes[class_uri].parents.keys())
else:
return None
def get_childs(self, class_uri):
if class_uri in self.nodes:
return list(self.nodes[class_uri].childs.keys())
else:
print("get_childs: <%s> does not exists in the nodes " % class_uri)
return None
def add_parent(self, class_uri, parent_uri):
# print("add to %s %s as a parent" % (class_uri, parent_uri))
if class_uri in self.nodes and parent_uri in self.nodes:
if parent_uri not in self.nodes[class_uri].parents:
self.nodes[class_uri].parents[parent_uri] = self.nodes[parent_uri]
self.nodes[parent_uri].childs[class_uri] = self.nodes[class_uri]
return True
return False
else:
print("parent uri: <%s> does not exists in the nodes " % parent_uri)
return None
def get_ancestors(self, class_uri):
ancestors = []
if class_uri in self.nodes:
# print("get_ancestors> parents of %s" % class_uri)
for p in self.get_parents(class_uri):
# print("get_ancestors>: "+p)
ancestors.append(p)
# ancestors += self.get_ancestors(p)
p_ancestors = self.get_ancestors(p)
ancestors += p_ancestors
return ancestors
# return list(set(ancestors))
else:
print("get_ancestors: <%s> is not added" % class_uri)
return None
def clear_scores(self):
for node in self.nodes:
node.clear()
def print_scores(self):
for class_uri in self.nodes:
node = self.nodes[class_uri]
node.print()
| 28.850746 | 82 | 0.509053 |
19478a8e58edff191bfcf838e26d18d123c0a55a
| 4,728 |
py
|
Python
|
bokeh/application/handlers/lifecycle.py
|
g-parki/bokeh
|
664ead5306bba64609e734d4105c8aa8cfb76d81
|
[
"BSD-3-Clause"
] | null | null | null |
bokeh/application/handlers/lifecycle.py
|
g-parki/bokeh
|
664ead5306bba64609e734d4105c8aa8cfb76d81
|
[
"BSD-3-Clause"
] | null | null | null |
bokeh/application/handlers/lifecycle.py
|
g-parki/bokeh
|
664ead5306bba64609e734d4105c8aa8cfb76d81
|
[
"BSD-3-Clause"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Bokeh Application Handler to look for Bokeh server lifecycle callbacks
in a specified Python module.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import Any, Callable
# Bokeh imports
from ...document import Document
from ..application import ServerContext, SessionContext
from .handler import Handler
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'LifecycleHandler',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class LifecycleHandler(Handler):
''' Load a script which contains server lifecycle callbacks.
.. autoclasstoc::
'''
_on_server_loaded: Callable[[ServerContext], None]
_on_server_unloaded: Callable[[ServerContext], None]
_on_session_created: Callable[[SessionContext], None]
_on_session_destroyed: Callable[[SessionContext], None]
def __init__(self) -> None:
super().__init__()
self._on_server_loaded = _do_nothing
self._on_server_unloaded = _do_nothing
self._on_session_created = _do_nothing
self._on_session_destroyed = _do_nothing
@property
def safe_to_fork(self) -> bool:
return True
# Public methods ----------------------------------------------------------
def modify_document(self, doc: Document) -> None:
''' This handler does not make any modifications to the Document.
Args:
doc (Document) : A Bokeh Document to update in-place
*This handler does not modify the document*
Returns:
None
'''
# we could support a modify_document function, might be weird though.
pass
def on_server_loaded(self, server_context: ServerContext) -> None:
''' Execute `on_server_unloaded`` from the configured module (if
it is defined) when the server is first started.
Args:
server_context (ServerContext) :
'''
return self._on_server_loaded(server_context)
def on_server_unloaded(self, server_context: ServerContext) -> None:
''' Execute ``on_server_unloaded`` from the configured module (if
it is defined) when the server cleanly exits. (Before stopping the
server's ``IOLoop``.)
Args:
server_context (ServerContext) :
.. warning::
In practice this code may not run, since servers are often killed
by a signal.
'''
return self._on_server_unloaded(server_context)
async def on_session_created(self, session_context: SessionContext) -> None:
''' Execute ``on_session_created`` from the configured module (if
it is defined) when a new session is created.
Args:
session_context (SessionContext) :
'''
return self._on_session_created(session_context)
async def on_session_destroyed(self, session_context: SessionContext) -> None:
''' Execute ``on_session_destroyed`` from the configured module (if
it is defined) when a new session is destroyed.
Args:
session_context (SessionContext) :
'''
return self._on_session_destroyed(session_context)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _do_nothing(ignored: Any) -> None:
pass
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 33.062937 | 82 | 0.483714 |
4b20ab14f0e271263136fa786763f91f173df4de
| 165 |
py
|
Python
|
URI Online Judge/Python/URI 1044.py
|
AugustoEstevaoMonte/Learning---C-programming
|
e496b301b6cc9dda68b1da6d72a4937b2c5f9aec
|
[
"MIT"
] | null | null | null |
URI Online Judge/Python/URI 1044.py
|
AugustoEstevaoMonte/Learning---C-programming
|
e496b301b6cc9dda68b1da6d72a4937b2c5f9aec
|
[
"MIT"
] | 1 |
2020-08-04T17:08:41.000Z
|
2020-08-04T17:12:48.000Z
|
URI Online Judge/Python/URI 1044.py
|
AugustoEstevaoMonte/Learning---C-programming
|
e496b301b6cc9dda68b1da6d72a4937b2c5f9aec
|
[
"MIT"
] | null | null | null |
x=input().split()
a,b=x
a=int(a)
b=int(b)
if b>a:
resul=b%a
else:
resul=a%b
if resul==0:
print("Sao Multiplos")
else:
print("Nao sao Multiplos")
| 12.692308 | 30 | 0.575758 |
eebffe8bbda47c236f618b9179a3878e915e0866
| 6,445 |
py
|
Python
|
env/lib/python3.8/site-packages/plotly/graph_objs/scatter/selected/_marker.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11,750 |
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
env/lib/python3.8/site-packages/plotly/graph_objs/scatter/selected/_marker.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,951 |
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
env/lib/python3.8/site-packages/plotly/graph_objs/scatter/selected/_marker.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,623 |
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatter.selected"
_path_str = "scatter.selected.marker"
_valid_props = {"color", "opacity", "size"}
# color
# -----
@property
def color(self):
"""
Sets the marker color of selected points.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity of selected points.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# size
# ----
@property
def size(self):
"""
Sets the marker size of selected points.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
"""
def __init__(self, arg=None, color=None, opacity=None, size=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter.selected.Marker`
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter.selected.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter.selected.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 33.051282 | 82 | 0.545694 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.